def save_index(imdbid,index,label):
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("INSERT INTO indexing Values (?, ?, ?)", (imdbid,index, label))
		dbcon.commit()
	except BaseException as e: basic.log(u"localdb.save_index ##Error: %s" % str(e))
def save_cache(imdbid,tmdbid,label,originallabel,poster,fanart_image,year,info):
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("INSERT INTO cache Values (?, ?, ?, ?, ?, ?, ?, ?)", (imdbid,tmdbid,label,originallabel,poster,fanart_image,year,info))
		dbcon.commit()
	except BaseException as e: basic.log(u"localdb.save_cache ##Error: %s" % str(e))
Esempio n. 3
0
 def edit_quantiles(self,q=.01,quantile_range=False,v=False,write=True):
     basic.log('creating edit quantiles %s' % self.lang)
     f_out = basic.create_dir('results/quantiles')
     df = pd.read_csv(self.db_path)
     df = self.drop_dups(df)
     df.page_id = df.page_id.astype(int)
     if self.drop1:
         df = df.loc[(df['len'] > 1)]
     q = np.arange(q,1+q,q)
     results = defaultdict(dict)
     for n in self.namespace:
         results[n] = defaultdict(dict)
         for r in self.revert:
             basic.log('%s %s %s' % (self.lang,n,r))
             if n == 'at':
                 result = df[r].quantile(q=q)
                 mean = df[r].mean()
             else:
                 result = df.loc[(df['namespace'] == self.namespace.index(n)),r].quantile(q=q)
                 #qcut = pd.qcut(df.loc[(df['namespace'] == self.namespace.index(n)),r],q)
                 #print(qcut)
                 mean = df.loc[(df['namespace'] == self.namespace.index(n)),r].mean()
             result = result.to_frame()
             column = '%s_%s_%s' % (self.lang,n,r)
             result.columns = [column]
             results[n][r] = {'quantiles':result,'mean':mean}
             if write:
                 result = result.append(DataFrame({column:result.loc[(result[column] < int(mean+1))].tail(1).index.values},index=['mean_quantile']))
                 result = result.append(DataFrame({column:mean},index=['mean_value']))
                 result.to_csv('%s/%s_%s_%s.csv' % (f_out,self.lang,n,r),encoding='utf-8',index_label='qauntiles')
     return results
Esempio n. 4
0
    def play_loop(self, freq, vol):
        log("soge: start play loop")
        if not self.player_proc: self.on_pulseplayer_start(None)

        self.playing = True
        self.stop = False

        n_sin = int(freq * self.duration)
        n_samp = self.fs / freq
        t_samp = round(n_samp * n_sin)

        dur = t_samp / self.fs

        samples = array('f', [])
        for i in range(t_samp):
            samples.append(float(sin(2 * i * pi / n_samp) * vol))

        self.player_proc.stdin.write(samples)
        self.player_proc.stdin.write(samples)

        while not self.stop:
            self.player_proc.stdin.write(samples)
            sleep(dur)

        self.stop = False
        self.playing = False
Esempio n. 5
0
def get_skin_colors(skin):
    try:
        with open(path_addon + "resources/skins/Default/skincolors.json") as f:
            colors = json.loads(f.read())

        defcol = colors["default"]
        try:
            skicol = colors[skin]
            log("skin: {} defined".format(skin))
        except KeyError:
            log("skin: {} not defined".format(skin))
            skicol = {}

        for key, val in list(skicol.items()):
            defcol[key] = val

        defcol["button_tags"] = "".join(defcol["button_tags"]).format(**defcol)
        defcol["button_textcolor"] = "".join(
            defcol["button_textcolor"]).format(**defcol)
        defcol["button_radio"] = "".join(
            defcol["button_radio"]).format(**defcol)
        defcol["progress_bar"] = "".join(
            defcol["progress_bar"]).format(**defcol)
        defcol["background_img"] = "".join(
            defcol["background_img"]).format(**defcol)

        return defcol

    except Exception as e:
        handle(e)

    return {}
Esempio n. 6
0
 def get_pa_object_list(self, target):
     log("padb: get objects %s" % target)
     targets = target + "s"
     result = {}
     for obj in self.pc.get_list(target):
         result[obj.index] = obj
     setattr(self, targets, result)
Esempio n. 7
0
def listmovies(url,index):
	basic.log(u"trakt.listmovies url: %s" % url)
	mainlist = []
	sendlist = [] 
	result = []
	threads = []
	order = 0
	if 'popular' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' }
	elif 'trending' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' }	
	print headers,url
	jsonpage = basic.open_url_headers(url,headers)
	print 'jsonpage %s' % jsonpage
	j = json.loads(jsonpage)
	for list in j:
		order += 1
		if 'trending' in url: sendlist.append([order,list['movie']['ids']['tmdb']])
		elif 'popular' in url: sendlist.append([order,list['ids']['tmdb']])
	chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)]
	for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, )))
	[i.start() for i in threads]
	[i.join() for i in threads]
	result = sorted(result, key=basic.getKey)
	for id,lists in result: mainlist.append(lists)
	basic.log(u"trakt.listmovies mainlist: %s" % mainlist)	
	return mainlist
Esempio n. 8
0
def contextMenu(**kwargs):
    while True:
        result = run_dialog(ContextGui, "ContextMenu.xml", **kwargs)
        log("contextMenu: selected: {}".format(result))

        #wait for animation
        time.sleep(0.3)

        if result["index"] is None:
            return None

        if result["type"] == "item":
            return result["index"]

        if result["type"] == "func":
            try:
                method = result["index"]
                if method:
                    method()
                    return None
            except Exception as e:
                opthandle(e)

        if result["type"] == "settings":
            kwargs["items"] = []

    return None
Esempio n. 9
0
def results(url, auth=True, post=None):
    try:
        trakt_key = links.link().trakt_apikey
        headers = {
            'Content-Type': 'application/json',
            'trakt-api-key': trakt_key,
            'trakt-api-version': '2'
        }
        if not post == None: post = json.dumps(post)
        if (links.link().trakt_user == ''
                or links.link().trakt_password == ''):
            pass
        elif auth == False:
            pass
        else:
            token = auth_token(links.link().trakt_user,
                               links.link().trakt_password)
            headers.update({
                'trakt-user-login': links.link().trakt_user,
                'trakt-user-token': token
            })
        request = urllib2.Request(url, data=post, headers=headers)
        response = urllib2.urlopen(request, timeout=30)
        result = response.read()
        response.close()
        return result
    except BaseException as e:
        basic.log(u"trakt.results ##Error: %s" % str(e))
Esempio n. 10
0
	def get_ffreq_coef(self,filter_rate, sample_rate):
		if self.profile is None: self.set_profile_default()
		if self.cur_spec and self.profile:
			spec =  self.cur_spec.apply_profile(self.profile.spec)
			preamp = self.profile.preamp
			info = "room correction %s and profile %s" % (self.cur_spec.name , self.profile.name)
		elif self.profile:
			spec =  self.profile.spec
			preamp = self.profile.preamp
			info = "profile %s and no room correction" % (self.profile.name)
		elif self.cur_spec:
			spec =  self.cur_spec
			preamp = 1.0
			info = "room correction %s and no profile" % (self.cur_spec.name)
		else:
			log("no room correction and no profile have been selected")
			return None

		spec = spec.set_filter_range(sample_rate // 2)

		if not self.filter_freq or filter_rate != self.filter_rate or sample_rate != self.sample_rate:
			self.calc_filter_freq(spec, filter_rate, sample_rate)

		if spec.__class__.__name__ == "Spectrum":
			coefs = [spec.get_coefs()]
		else: coefs = spec.get_coefs()

		log("%s, number of channels: %s" % (info, len(coefs)))

		return self.filter_freq, preamp, coefs
Esempio n. 11
0
File: localdb.py Progetto: teosan5/0
def save_cache(imdbid,
               tmdbid,
               label,
               originallabel,
               poster,
               fanart_image,
               year,
               info,
               an=None):
    try:
        dbcon = database.connect(addonCache)
        dbcur = dbcon.cursor()
        if an and poster == '':
            try:
                dbcur.execute(
                    "INSERT INTO cache Values (?, ?, ?, ?, ?, ?, ?, ?)",
                    (label, tmdbid, label, originallabel, poster, fanart_image,
                     year, info))
            except:
                pass
        else:
            dbcur.execute("INSERT INTO cache Values (?, ?, ?, ?, ?, ?, ?, ?)",
                          (imdbid, tmdbid, label, originallabel, poster,
                           fanart_image, year, info))
        dbcon.commit()
    except BaseException as e:
        basic.log(u"localdb.save_cache ##Error: %s" % str(e))
Esempio n. 12
0
File: trakt.py Progetto: teosan5/0
def listmovies(url,index):
	basic.log(u"trakt.listmovies url: %s" % url)
	mainlist = []
	sendlist = [] 
	result = []
	threads = []
	order = 0
	if 'popular' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' }
	elif 'trending' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' }	
	print headers,url
	jsonpage = basic.open_url_headers(url,headers)
	print 'jsonpage %s' % jsonpage
	j = json.loads(jsonpage)
	for list in j:
		order += 1
		if 'trending' in url: sendlist.append([order,list['movie']['ids']['tmdb']])
		elif 'popular' in url: sendlist.append([order,list['ids']['tmdb']])
	chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)]
	for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, )))
	[i.start() for i in threads]
	[i.join() for i in threads]
	result = sorted(result, key=basic.getKey)
	for id,lists in result: mainlist.append(lists)
	basic.log(u"trakt.listmovies mainlist: %s" % mainlist)	
	return mainlist
Esempio n. 13
0
    def load(self, filename):
        path, fn = os.path.split(filename)
        name, ext = os.path.splitext(fn)
        name, nr = os.path.splitext(name)

        self.name = name

        cnt = 0

        try:
            nr = int(nr[1:])

            for i in range(1, 10):
                fn = "%s/%s.%s%s" % (path, name, i, ext)
                if not os.path.exists(fn): continue
                log("load %s" % fn)

                self.speclist[i] = Spectrum().load(fn)
                self.filenames[i] = "%s.%s%s" % (name, i, ext)
                cnt = cnt + 1
        except ValueError:
            pass
        except Exception as e:
            handle(e)

        if not cnt:
            if os.path.exists(filename):
                self.speclist[0] = Spectrum().load(filename)
                self.filenames[0] = fn
                cnt = 1

        self.count = cnt
        return self
Esempio n. 14
0
def listmovies(url):
    basic.log(u"imdb.listmovies url: %s" % url)
    mainlist = []
    sendlist = []
    result = []
    threads = []
    order = 0
    htmlpage = basic.open_url(url)
    found = re.findall('data-tconst="(.+?)"', htmlpage, re.DOTALL)
    for imdb_id in sorted(set(found), key=lambda x: found.index(x)):
        order += 1
        sendlist.append([order, imdb_id])

#with open('/root/.kodi/temp/files.py', 'wb') as f: f.write(repr(sorted(set(found), key=lambda x: found.index(x))))
    chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)]
    for i in range(len(chunks)):
        threads.append(
            threading.Thread(name='listmovies' + str(i),
                             target=tmdb.searchmovielist,
                             args=(
                                 chunks[i],
                                 result,
                             )))
    [i.start() for i in threads]
    [i.join() for i in threads]
    result = sorted(result, key=basic.getKey)
    for id, lists in result:
        mainlist.append(lists)
    basic.log(u"imdb.listmovies mainlist: %s" % mainlist)
    return mainlist
Esempio n. 15
0
def runDialog(dialog, name ,**kwargs):
	name = "{}.xml".format(name)
	skin, color = get_valid_skin()
	file_s = file_struct(skin, name)

	#
	#	prepare template
	#

	with open( file_s["template"]) as f: template = f.read()

	write_dialog(file_s,localize(template.format(**color)))

	#
	#	run Dialog
	#
	log("runDialog")

	ui = dialog(name, path_tmp, "Default", "720p", **kwargs)
	ui.doModal()

	os.remove(file_s["tmp_dialog"])

	# wait for animation finished
	time.sleep(0.2)
Esempio n. 16
0
	def listen_loop(self, callback):
		log("socket: start socket loop")
		sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
		sock.settimeout(None)

		try: os.remove(self.sock_name)
		except OSError:	pass

		sock.bind(self.sock_name)

		while True:
			try:
				result, conn = self.get_from_socket(sock)

				#log("socket: {} receive '{}'".format(self.sock_name, result))

				if result == self.exit_str:
					conn.close()
					break
				if result == self.life_str:
					self._send(conn,self.life_str)
					continue

				callback(conn, result)

			except Exception as e: infhandle(e)
		log("socket: stop socket loop")

		try: os.remove(self.sock_name)
		except OSError:	pass
Esempio n. 17
0
File: omdbapi.py Progetto: teosan5/0
def listmovies(url):
    basic.log(u"omdbapi.listmovies url: %s" % url)
    mainlist = []
    sendlist = []
    result = []
    threads = []
    order = 0
    jsonpage = basic.open_url(url)
    j = json.loads(jsonpage)
    for list in j['results']:
        order += 1
        sendlist.append([order, list['id']])
    chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)]
    for i in range(len(chunks)):
        threads.append(
            threading.Thread(name='listmovies' + str(i),
                             target=searchmovielist,
                             args=(
                                 chunks[i],
                                 result,
                             )))
    [i.start() for i in threads]
    [i.join() for i in threads]
    result = sorted(result, key=basic.getKey)
    for id, lists in result:
        mainlist.append(lists)
    basic.log(u"omdbapi.listmovies mainlist: %s" % mainlist)
    return mainlist
def listmovies(url):
    basic.log(u"rotten.listmovies url: %s" % url)
    mainlist = []
    sendlist = []
    result = []
    threads = []
    order = 0
    jsonpage = basic.open_url(url)
    print 'jsonpage %s' % jsonpage
    j = json.loads(jsonpage)
    for list in j['movies']:
        order += 1
        try:
            sendlist.append([order, 'tt' + list['alternate_ids']['imdb']])
        except:
            pass
    chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)]
    for i in range(len(chunks)):
        threads.append(
            threading.Thread(name='listmovies' + str(i),
                             target=tmdb.searchmovielist,
                             args=(
                                 chunks[i],
                                 result,
                             )))
    [i.start() for i in threads]
    [i.join() for i in threads]
    result = sorted(result, key=basic.getKey)
    for id, lists in result:
        mainlist.append(lists)
    basic.log(u"rotten.listmovies mainlist: %s" % mainlist)
    return mainlist
Esempio n. 19
0
    def load_required_module(self, name):
        for _, module in list(self.padb.modules.items()):
            if module.name == name:
                log("pamm: %s already loaded" % name)
                return

        self.pc.load_module(name)
Esempio n. 20
0
def searchmovielist(list, result):
    basic.log(u"tmdb.searchmovielist list: %s" % list)
    for num, id in list:
        moviedata = searchmovie(id)
        if moviedata:
            result.append([num, moviedata])
    basic.log(u"tmdb.searchmovielist result: %s" % result)
Esempio n. 21
0
def get_current_skin():
    resp = executeJSONRPC(
        '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue", "params":{ "setting":"lookandfeel.skin"}, "id":1}'
    )
    skin = json.loads(resp)["result"]["value"][5:].lower()
    log("skin: {}".format(skin))
    return skin
Esempio n. 22
0
    def on_volume_get(self):
        log("pamm: on_volume_get")

        if self.padb.output_sink is None: return None
        try:
            return self.pc.get_sink_volume(self.padb.output_sink.index)
        except Exception:
            return None
Esempio n. 23
0
    def on_eq_off_switch(self):
        if not self.padb.output_sink: return
        if not self.padb.kodi_is_dynamic: return

        log("pamm: on_eq_off_switch")

        self.config.set("eq_enable", "off", self.padb.output_sink.name)
        self.adjust_routing()
Esempio n. 24
0
    def on_sink_change(self, index):
        if self.padb.output_sink and self.padb.output_sink.index == index:
            log("pamm: on_sink_change %s" % self.padb.output_sink.name)
            vol = self.pc.get_sink_volume(self.padb.output_sink.index)
            self.config.set("volume", vol, self.padb.output_sink.name)

        else:
            log("pamm: on_sink_change %d" % index)
Esempio n. 25
0
    def on_sweep_play(self, count=1, channel=None, vol=1):
        log("soge: on_sweep_play")
        vol = vol * 0.58
        count = int(count)
        if self.playing: self.on_tone_stop()
        if count < 1: count = 1

        Thread(target=self.sweep_play_loop, args=(count, channel, vol)).start()
Esempio n. 26
0
	def on_left_right(self, fid, step):
		x,y = self.get_pos(fid)
		log("{} {}".format(x,y))
		x += step
		if x < 0: x = 2
		if x > 2: x = 0
		if y > self.maxy[x]: y = self.maxy[x]
		self.setFocusId(self.get_cid(x,y))
Esempio n. 27
0
def delete_index():
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("DELETE FROM indexing")
		dbcur.execute("VACUUM")		
		dbcon.commit()
	except BaseException as e: basic.log(u"localdb.delete_index ##Error: %s" % str(e))
Esempio n. 28
0
def create_tables():
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("CREATE TABLE IF NOT EXISTS indexing (""imdbid TEXT, ""pageid TEXT, ""label TEXT, ""UNIQUE(imdbid)"");")
		dbcur.execute("CREATE TABLE IF NOT EXISTS cache (""imdbid TEXT, ""tmdbid TEXT, ""label TEXT, ""originallabel TEXT, ""poster TEXT, ""fanart_image TEXT, ""year TEXT, ""info TEXT, ""UNIQUE(imdbid)"");")
		dbcon.commit()
	except BaseException as e: basic.log(u"localdb.create_tables ##Error: %s" % str(e))
Esempio n. 29
0
    def on_pulse_connect(self):
        log("pact: start pulse control")
        self.pc.start()
        self.padb.on_pa_connect()
        self.pamm.on_pa_connect()

        SocketCom("kodi").call_func("up", "service", [])
        SocketCom("kodi").call_func("get", "player", [])
Esempio n. 30
0
File: localdb.py Progetto: teosan5/0
def save_index(imdbid, index, label):
    try:
        dbcon = database.connect(addonCache)
        dbcur = dbcon.cursor()
        dbcur.execute("INSERT INTO indexing Values (?, ?, ?)",
                      (imdbid, index, label))
        dbcon.commit()
    except BaseException as e:
        basic.log(u"localdb.save_index ##Error: %s" % str(e))
Esempio n. 31
0
File: localdb.py Progetto: teosan5/0
def delete_index():
    try:
        dbcon = database.connect(addonCache)
        dbcur = dbcon.cursor()
        dbcur.execute("DELETE FROM indexing")
        dbcur.execute("VACUUM")
        dbcon.commit()
    except BaseException as e:
        basic.log(u"localdb.delete_index ##Error: %s" % str(e))
Esempio n. 32
0
	def profile_load(self, name):
		if not self.profiles: self.profile_file_load()
		try:
			self.profile = EqProfile([name] + self.profiles[name])
		except KeyError:
			log("cannot find %s, load default profile" % name)
			self.profile = EqProfile()
		except Exception as e: handle(e)
		self.filter_freq = None
Esempio n. 33
0
 def wait_user_action(self):
     try:
         log("launcher: wait for user action")
         with open(self.ppath) as f:
             result = f.read()
         return result
     except OSError as e:
         handle(e)
         return self.exit_str
Esempio n. 34
0
File: cnmg.py Progetto: teosan5/0
def listmovies(url, tip):
    basic.log(u"cnmg.listmovies url: %s" % url)
    mainlist = []
    sendlist = []
    result = []
    threads = []
    order = 0
    if tip == 'liste':
        htmlpage = basic.open_url(url)
        regex = '''<li class="list_item clearfix">(.+?)</li>'''
        regex2 = '''<a [^>]*href\s*=\s*"[^"]*imdb.com/title/(.*?)/"'''
        for lists in re.compile(regex, re.IGNORECASE | re.MULTILINE
                                | re.DOTALL).findall(htmlpage):
            for imdb_id in re.compile(regex2, re.DOTALL).findall(lists):
                order += 1
                sendlist.append([order, imdb_id])
        target = tmdb.searchmovielist
    elif tip == 'filme':
        headers = {
            'User-Agent':
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Referer': url,
            'Cookie': 'ps=30'
        }
        htmlpage = basic.open_url_headers(url, headers)
        regex = '''<div class="poza">(.+?)</div>\n</li>'''
        regex2 = '''img src="(.+?)".+?<h2>.+?title.+?>(.+?)<.+?\((\d+)\).*(?:^$|<li>(.+?)</li>).*(?:^$|<li>(.+?)</li>).+?Gen.+?">(.+?)</ul>.+?(?:^$|\((.+?)\)).+?body".+?(?:^$|href="(.+?)".+?)(?:^$|<span>(.+?)</span>)'''
        for lists in re.compile(regex, re.IGNORECASE | re.MULTILINE
                                | re.DOTALL).findall(htmlpage):
            for imagine, nume, an, regia, actori, gen, nota, trailer, descriere in re.compile(
                    regex2,
                    re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(lists):
                order += 1
                nume = nume.decode('utf-8')
                sendlist.append([
                    order, imagine, nume, an, regia, actori, gen, nota,
                    trailer, descriere
                ])
        target = omdbapi.searchmovielist
    chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)]
    for i in range(len(chunks)):
        threads.append(
            threading.Thread(name='listmovies' + str(i),
                             target=target,
                             args=(
                                 chunks[i],
                                 result,
                             )))
    [i.start() for i in threads]
    [i.join() for i in threads]
    result = sorted(result, key=basic.getKey)
    for id, lists in result:
        mainlist.append(lists)
    basic.log(u"imdb.listmovies mainlist: %s" % mainlist)
    return mainlist
Esempio n. 35
0
def delete_cache():
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("DELETE FROM cache")
		dbcur.execute("VACUUM")
		dbcon.commit()
		return language(30022).encode('utf-8')
	except BaseException as e: basic.log(u"localdb.delete_cache ##Error: %s" % str(e))
Esempio n. 36
0
    def on_volume_set(self, volume):
        vol = float(volume)
        log("pamm: on_volume_set %f" % vol)
        if self.padb.output_sink is None: return None

        try:
            self.pc.set_sink_volume(self.padb.output_sink.index, vol)
        except Exception as e:
            handle(e)
Esempio n. 37
0
File: localdb.py Progetto: teosan5/0
def delete_cache():
    try:
        dbcon = database.connect(addonCache)
        dbcur = dbcon.cursor()
        dbcur.execute("DELETE FROM cache")
        dbcur.execute("VACUUM")
        dbcon.commit()
        return language(30022).encode('utf-8')
    except BaseException as e:
        basic.log(u"localdb.delete_cache ##Error: %s" % str(e))
Esempio n. 38
0
	def call_func(self, func, target, args=[]):
		send_string = json.dumps([func,target,args])
		log("socket: call_func send '{}'".format(send_string))
		result = self.send_to_server(send_string)
		log("socket: call_func receive '{}'".format(result))

		if result is not None:
			try: return json.loads(result)
			except Exception as e: infhandle(e)
		return None
Esempio n. 39
0
def get_cache(id):
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		if str(id).startswith('tt'): dbcur.execute("SELECT * FROM cache WHERE imdbid = '%s'" % (id))
		else: dbcur.execute("SELECT * FROM cache WHERE tmdbid = '%s'" % (id))
		found = dbcur.fetchone()
		if not found: return False
		else: return found
	except BaseException as e: basic.log(u"localdb.get_cache ##Error: %s" % str(e))
Esempio n. 40
0
def get_index(imdbid,index,label):
	try:
		dbcon = database.connect(addonCache)
		dbcur = dbcon.cursor()
		dbcur.execute("SELECT * FROM indexing WHERE imdbid = '%s'" % (imdbid))
		found = dbcur.fetchone()
		if not found: 
			save_index(imdbid,index,label)
			return False
		else: 
			if found[1] == index: return False
			return True
	except BaseException as e: basic.log(u"localdb.get_index ##Error: %s" % str(e))
Esempio n. 41
0
def auth_token(trakt_user, trakt_password):
	try:
		trakt_key = links.link().trakt_apikey
		headers = {'Content-Type': 'application/json', 'trakt-api-key': trakt_key, 'trakt-api-version': '2'}
		post = json.dumps({'login': trakt_user, 'password': trakt_password})
		print headers,post
		request = urllib2.Request('https://api.trakt.tv/auth/login', data=post, headers=headers)
		response = urllib2.urlopen(request, timeout=10)
		result = response.read()
		result = json.loads(result)
		auth = result['token']
		response.close()
		return auth
	except BaseException as e:
		basic.log(u"trakt.auth ##Error: %s" % str(e))
Esempio n. 42
0
 def edit_histogram(self,plot=True,v=False):
     basic.log('creating edit histogram %s' % self.lang)
     f_out = basic.create_dir('results/histograms')
     df = pd.read_csv(self.db_path)
     df = self.drop_dups(df)
     if self.drop1:
         df = df.loc[(df['len'] > 1)]
     for n in self.namespace:
         for r in self.revert:
             basic.log('%s %s %s' % (self.lang,n,r))
             if n == 'at':
                 result = df[r].value_counts()
             else:
                 result = df.loc[(df['namespace'] == self.namespace.index(n)),r].value_counts()
             result = result.sort_index(ascending=True)
             result.columns = ['articles']
             result.to_csv('%s/%s_%s_%s.csv' % (f_out,self.lang,n,r),encoding='utf-8',index_label='edits')
Esempio n. 43
0
def results(url, auth=True, post=None):
	try:
		trakt_key = links.link().trakt_apikey
		headers = {'Content-Type': 'application/json', 'trakt-api-key': trakt_key, 'trakt-api-version': '2'}
		if not post == None: post = json.dumps(post)
		if (links.link().trakt_user == '' or links.link().trakt_password == ''): pass
		elif auth == False: pass
		else:
			token = auth_token(links.link().trakt_user, links.link().trakt_password)
			headers.update({'trakt-user-login': links.link().trakt_user, 'trakt-user-token': token})
		request = urllib2.Request(url, data=post, headers=headers)
		response = urllib2.urlopen(request, timeout=30)
		result = response.read()
		response.close()
		return result
	except BaseException as e:
		basic.log(u"trakt.results ##Error: %s" % str(e))
Esempio n. 44
0
def listmovies(url):
	basic.log(u"omdbapi.listmovies url: %s" % url)
	mainlist = []
	sendlist = [] 
	result = []
	threads = []
	order = 0
	jsonpage = basic.open_url(url)
	j = json.loads(jsonpage)
	for list in j['results']: 
		order += 1
		sendlist.append([order,list['id']])
	chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)]
	for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=searchmovielist,args=(chunks[i],result, )))
	[i.start() for i in threads]
	[i.join() for i in threads]
	result = sorted(result, key=basic.getKey)
	for id,lists in result: mainlist.append(lists)
	basic.log(u"omdbapi.listmovies mainlist: %s" % mainlist)	
	return mainlist
Esempio n. 45
0
def listmovies(url):
	basic.log(u"imdb.listmovies url: %s" % url)
	mainlist = []
	sendlist = [] 
	result = []
	threads = []
	order = 0
	htmlpage = basic.open_url(url)
	found = re.findall('data-tconst="(.+?)"',htmlpage, re.DOTALL)
	for imdb_id in found: 
		order += 1
		sendlist.append([order,imdb_id])
	chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)]
	for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, )))
	[i.start() for i in threads]
	[i.join() for i in threads]
	result = sorted(result, key=basic.getKey)
	for id,lists in result: mainlist.append(lists)
	basic.log(u"imdb.listmovies mainlist: %s" % mainlist)	
	return mainlist
def listmovies(url):
	basic.log(u"rotten.listmovies url: %s" % url)
	mainlist = []
	sendlist = [] 
	result = []
	threads = []
	order = 0
	jsonpage = basic.open_url(url)
	print 'jsonpage %s' % jsonpage
	j = json.loads(jsonpage)
	for list in j['movies']:
		order += 1
		try: sendlist.append([order,'tt'+list['alternate_ids']['imdb']])
		except: pass
	chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)]
	for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, )))
	[i.start() for i in threads]
	[i.join() for i in threads]
	result = sorted(result, key=basic.getKey)
	for id,lists in result: mainlist.append(lists)
	basic.log(u"rotten.listmovies mainlist: %s" % mainlist)	
	return mainlist
Esempio n. 47
0
def getlinks(url,results,order,Source=None):
	basic.log(u"imdb.getlinks url: %s" % url)
	try:
		html_page = basic.open_url(url)
		if html_page:
			soup = BeautifulSoup(html_page)
			if Source == 'IMDB':
				for link in soup.findAll('a', attrs={'href': re.compile("^/title/.+?/\?ref_=.+?_ov_tt")}):
					if '?' in link.get('href'): cleanlink = link.get('href').split("?")[0].split("title")[1].replace('/','').replace('awards','').replace('videogallery','')
					else: cleanlink = link.get('href').split("title")[1].replace('/','').replace('awards','').replace('videogallery','')
					results.append([order, cleanlink])
					order += 1			
			else:
				for link in soup.findAll('a', attrs={'href': re.compile("^http://.+?/title/")}):
					if '?' in link.get('href'): cleanlink = link.get('href').split("?")[0].split("/title/")[1].replace('/','').replace('awards','').replace('videogallery','')
					else: cleanlink = link.get('href').split("title")[1].replace('/','').replace('awards','').replace('videogallery','')
					results.append([order, cleanlink])
					order += 1
			basic.log(u"imdb.getlinks results: %s" % results)
			return results
	except BaseException as e: basic.log(u"imdb.getlinks ERROR: %s - %s" % (str(url),str(e)))
Esempio n. 48
0
 def edit_statistics(self,statistics,v=False):
     f_out = basic.create_dir('results/basic_stats')
     if self.drop1:
         f = open('%s/edits_drop1_%s.csv' % (f_out,self.lang),'w')
     else:
         f = open('%s/edits_%s.csv' % (f_out,self.lang),'w')
     header = '"lang"'
     for n in self.namespace:
         for r in self.revert:
             for s in statistics:    
                 header = header + ((',"%s_%s_%s"') % (n,s,r))
     header = header + '\n'
     f.write(header)
     result = defaultdict(dict)
     f.write('"%s"' % self.lang)
     result[self.lang] = defaultdict(dict)
     df = pd.read_csv(self.db_path)
     df = self.drop_dups(df)
     if self.drop1:
         df = df.loc[(df['len'] > 1)]
     for n in self.namespace:
         result[self.lang][n] = defaultdict(dict)
         for r in self.revert:
             result[self.lang][n][r] = defaultdict(dict)
             basic.log('%s %s %s' % (self.lang,n,r))
             for s in statistics:
                 if s == 'total':
                     if n == 'at':
                         result[self.lang][n][r][s] = df[r].sum()
                     else:
                         result[self.lang][n][r][s] = df.loc[(df['namespace'] == self.namespace.index(n)),r].sum()
                 elif s == 'var':
                     if n == 'at':
                         result[self.lang][n][r][s] = df[r].var()
                     else:
                         result[self.lang][n][r][s] = df.loc[(df['namespace'] == self.namespace.index(n)),r].var()
                 elif s == 'std':
                     if n == 'at':
                         result[self.lang][n][r][s] = df[r].std()
                     else:
                         result[self.lang][n][r][s] = df.loc[(df['namespace'] == self.namespace.index(n)),r].std()
                 elif s == 'mean':
                     if n == 'at':
                         result[self.lang][n][r][s] = df[r].mean()
                     else:
                         result[self.lang][n][r][s] = df.loc[(df['namespace'] == self.namespace.index(n)),r].mean()
                 elif s == 'median':
                     if n == 'at':
                         result[self.lang][n][r][s] = df[r].median()
                     else:
                         result[self.lang][n][r][s] = df.loc[(df['namespace'] == self.namespace.index(n)),r].median()
                 elif s == 'total_ratio':
                     if n == 't':
                         result[self.lang][n][r][s] = float(result[self.lang]['a'][r]['total'])/result[self.lang]['t'][r]['total']
                 elif s == 'mean_ratio':
                     if self.namespace.index(n) == (len(self.namespace)-1):
                         result[self.lang][n][r][s] = float(result[self.lang]['a'][r]['mean'])/result[self.lang]['t'][r]['mean']
                 elif s == 'missing_talk':
                     if self.namespace.index(n) == (len(self.namespace)-1):
                         result[self.lang][n][r][s] = len(df.loc[(df['linked_id'] == 'NONE')])
                 
                 f.write(',%s' % result[self.lang][n][r][s])
     f.write('\n')
     f.close()
     return result
Esempio n. 49
0
 def drop_dups(self,df):
     basic.log('dropped %s duplicates' % len(df.set_index('page_id',drop=False).index.get_duplicates()))
     return df.drop_duplicates(subset='page_id',keep=False)
Esempio n. 50
0
 def edit_ratio_histogram(self):
     basic.log('creating edit histogram %s' % self.lang)
     f_out = basic.create_dir('results/ratio_histograms')
     df = pd.read_csv(self.db_path)
     df.page_id = df.page_id.astype(float)
     df = df.loc[df['linked_id'] != None]
     df.linked_id = df.linked_id.astype(float)
     df = self.drop_dups(df)
     basic.log('dropped %s duplicates' % len(df.set_index('page_id',drop=False).index.get_duplicates()))
     df = df.drop_duplicates(subset='page_id',keep=False)
     if self.drop1:
         df = df.loc[(df['len'] > 1)]
     for r in self.revert:
         basic.log('%s %s' % (self.lang,r))
         basic.log('%s pages' % len(df))
         n0 = df.loc[(df['namespace'] == 0)].set_index('page_id',drop=False)
         n1 = df.loc[(df['namespace'] == 1)].set_index('linked_id',drop=False)
         basic.log('%s articles' % len(n0))
         basic.log('%s talk' % len(n1))
         ratio = n0[r].divide(n1[r],axis='index',fill_value=-1).to_frame()
         ratio.columns = ['ratio']
         ratio.ratio = ratio.ratio.astype(int)
         ratio = n0.join(ratio).set_index('page_id')
         ratio = ratio.loc[ratio['ratio'] >= 0]
         basic.log('%s ratios' % len(ratio))
         result = ratio['ratio'].value_counts().to_frame()
         result = result.sort_index(ascending=True)
         result.columns = ['pages']
         result.to_csv('%s/%s_%s.csv' % (f_out,self.lang,r),encoding='utf-8',index_label='edit_ratio')
Esempio n. 51
0
def searchmovie(id,cache=True):
	basic.log(u"omdbapi.searchmovie id: %s" % id)
	listgenre = []
	listcast = []
	listcastr = []	
	genre = ''
	title = ''
	plot = ''
	tagline = ''
	director = ''
	writer = ''
	credits = ''
	poster = ''
	fanart = ''
	trailer = ''
	year = ''
	dur = 0
	if cache:
		if getSetting("cachesites") == 'true':
			cached = localdb.get_cache(id)
			if cached:
				response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) }
				return response		
	jsonpage = basic.open_url(links.link().omdbapi_info % (id))
	jdef = json.loads(jsonpage)
	title = jdef['Title']
	poster = jdef['Poster']
	fanart = poster
	genre = jdef['Genre']
	plot = jdef['Plot']
	tagline = plot
	try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0]
	except: year = jdef['Year']
	listcast = jdef['Actors'].split(', ')
	director = jdef['Director']
	writer = jdef['Writer']
	duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL)
	if duration: dur = int(duration[0])
	else: 
		duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL)
		if duration: dur = int(duration[0])*60
	info = {
			"genre": genre, 
			"year": year,
			"rating": jdef['imdbRating'], 
			"cast": listcast,
			"castandrole": listcast,
			"director": director,
			"plot": plot,
			"plotoutline": plot,
			"title": title,
			"originaltitle": title,
			"duration": dur,
			"studio": '',
			"tagline": tagline,
			"writer": writer,
			"premiered": '',
			"code": id,
			"credits": '',
			"votes": jdef['imdbVotes'],
			"trailer": ''
			}		
	response = {
		"label": '%s (%s)' % (title,year),
		"originallabel": '%s (%s)' % (title,year),
		"poster": poster,
		"fanart_image": fanart,
		"imdbid": id,
		"year": year,
		"info": info
		}
	if cache:
		if getSetting("cachesites") == 'true': localdb.save_cache(id,'','%s (%s)' % (title,year),'%s (%s)' % (originaltitle,year),poster,fanart,year,json.dumps(info))
	return response
Esempio n. 52
0
def searchmovie(id):
    basic.log(u"tmdb.searchmovie id: %s" % id)
    listgenre = []
    listcast = []
    listcastr = []
    genre = ""
    title = ""
    plot = ""
    tagline = ""
    director = ""
    writer = ""
    credits = ""
    poster = ""
    fanart = ""
    temptitle = ""
    originaltitle = ""
    if getSetting("cachesites") == "true":
        cached = localdb.get_cache(id)
        if cached:
            response = {
                "label": cached[2],
                "originallabel": cached[3],
                "poster": cached[4],
                "fanart_image": cached[5],
                "imdbid": cached[0],
                "year": cached[6],
                "info": json.loads(cached[7]),
            }
            return response
    jsonpage = basic.open_url(links.link().tmdb_info_default % (id))
    try:
        jdef = json.loads(jsonpage)
    except:
        if "tt" in str(id):
            try:
                jdef = omdbapi.searchmovie(str(id))
                return jdef
            except:
                return False
        else:
            return False
    if LANG <> "en":
        try:
            jsonpage = basic.open_url(links.link().tmdb_info % (id, LANG))
            j = json.loads(jsonpage)
            temptitle = j["title"].encode("ascii", "ignore").replace(" ", "")
            if temptitle <> "":
                title = j["title"]
            fanart = links.link().tmdb_backdropbase % (j["backdrop_path"])
            poster = links.link().tmdb_posterbase % (j["poster_path"])
            for g in j["genres"]:
                listgenre.append(g["name"])
            genre = ", ".join(listgenre)
            try:
                plot = j["overview"]
            except:
                pass
            try:
                tagline = j["tagline"]
            except:
                pass
            fanart = j["backdrop_path"]
            poster = j["poster_path"]
        except:
            pass
    temptitle = jdef["title"].encode("ascii", "ignore").replace(" ", "")
    if temptitle <> "":
        if not title:
            title = jdef["title"]
    temporiginaltitle = jdef["original_title"].encode("ascii", "ignore")
    if temptitle == "":
        originaltitle = jdef["title"]
    if temporiginaltitle == "":
        originaltitle = jdef["title"]
    else:
        originaltitle = jdef["original_title"]
    if not poster:
        poster = jdef["poster_path"]
    if not fanart:
        fanart = jdef["backdrop_path"]
    if not fanart:
        fanart = poster
    if fanart:
        fanart = links.link().tmdb_backdropbase % (fanart)
    if poster:
        poster = links.link().tmdb_posterbase % (poster)
    if genre == "":
        for g in jdef["genres"]:
            listgenre.append(g["name"])
        genre = ", ".join(listgenre)
    if not plot:
        plot = jdef["overview"]
    if not tagline:
        tagline = jdef["tagline"]
    try:
        trailer = links.link().youtube_plugin % (jdef["trailers"]["youtube"][0]["source"])
    except:
        trailer = ""
    try:
        year = jdef["release_date"].split("-")[0]
    except:
        year = ""
    try:
        studio = jdef["production_companies"][0]["name"]
    except:
        studio = ""
    for listc in jdef["credits"]["cast"]:
        listcastr.append(listc["name"] + "|" + listc["character"])
        listcast.append(listc["name"])
    for crew in jdef["credits"]["crew"]:
        if crew["job"] == "Director":
            director = crew["name"]
        break
    for crew in jdef["credits"]["crew"]:
        if crew["job"] == "Story":
            credits = crew["name"]
        break
    for crew in jdef["credits"]["crew"]:
        if crew["job"] == "Writer":
            writer = crew["name"]
            break
        if crew["job"] == "Novel":
            writer = crew["name"]
            break
        if crew["job"] == "Screenplay":
            writer = crew["name"]
            break
    duration = jdef["runtime"]
    if not poster or duration == 0 and jdef["imdb_id"]:
        altsearch = omdbapi.searchmovie(jdef["imdb_id"], False)
        if not poster:
            poster = altsearch["poster"]
        if not fanart:
            fanart = poster
        if not plot:
            plot = altsearch["info"]["plot"]
        if not tagline:
            tagline = altsearch["info"]["plot"]
        if not listcast:
            listcast = altsearch["info"]["cast"]
            listcastr = []
        if not duration:
            duration = altsearch["info"]["duration"]
        if not writer:
            writer = altsearch["info"]["writer"]
        if not director:
            director = altsearch["info"]["director"]
        if not genre:
            genre = altsearch["info"]["genre"]
    info = {
        "genre": genre,
        "year": year,
        "rating": jdef["vote_average"],
        "cast": listcast,
        "castandrole": listcastr,
        "director": director,
        "plot": plot,
        "plotoutline": plot,
        "title": title,
        "originaltitle": originaltitle,
        "duration": duration,
        "studio": studio,
        "tagline": tagline,
        "writer": writer,
        "premiered": jdef["release_date"],
        "code": jdef["imdb_id"],
        "credits": credits,
        "votes": jdef["vote_count"],
        "trailer": trailer,
    }
    response = {
        "label": "%s (%s)" % (title, year),
        "originallabel": "%s (%s)" % (originaltitle, year),
        "poster": poster,
        "fanart_image": fanart,
        "imdbid": jdef["imdb_id"],
        "year": year,
        "info": info,
    }
    if getSetting("cachesites") == "true":
        if not str(id).startswith("tt"):
            tmdbid = id
        else:
            tmdbid = jdef["id"]
        localdb.save_cache(
            jdef["imdb_id"],
            tmdbid,
            "%s (%s)" % (title, year),
            "%s (%s)" % (originaltitle, year),
            poster,
            fanart,
            year,
            json.dumps(info),
        )
    return response