def tmovies_tv(name,movie_title): try: sources = [] searchUrl = base_url+'watch_episode/' # if 'House' in movie_title: # movie_title = movie_title.replace('House','DR House') movie_name = movie_title[:-6] movie_name_short = movie_title[:-7] movie_year = movie_title[-6:] movie_year = movie_year.replace('(','').replace(')','') movie_match =movie_name.replace(" ","_").replace(":","").replace("-","") year_movie_match = movie_match+movie_year direct_movie_match = movie_match[:-1] seasons=re.compile('S(.+?)E(.+?) ').findall(name) for sea,epi in seasons: tmurl = searchUrl+direct_movie_match+'/'+sea+'/'+epi+'/' link = OPEN_URLTM(tmurl) names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}) urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.','') #host = tools.get_hostname(host) source = {'url': url, 'host':host,'direct':False} sources.append(source) sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters =[] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='TwoMovies',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None) return hosters
def get_sources(video): source_url = urlparse.urljoin(base_url, video) # print url # source_url = _http_get(url) hosters = [] if source_url and source_url != FORCE_NO_MATCH: page_url = urlparse.urljoin(base_url, source_url) html = _http_get(page_url, cache_limit=.25) for link in dom_parser.parse_dom(html, 'li', {'class': 'elemento'}): match = re.search('href="[^"]*/load-episode/#([^"]+)', link) #print match if match: stream_url = base64.decodestring(match.group(1)) # print "STREAM URL BASE IS " +stream_url if stream_url.startswith('http'): label = dom_parser.parse_dom(link, 'span', {'class': 'd'}) host = urlparse.urlparse(stream_url).hostname quality = get_quality(video, host, QUALITIES.HIGH) hoster = { 'multi-part': False, 'host': host, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False } if label: hoster['label'] = label[0] hosters.append(hoster) return hosters
def iwatchon(name): sources = [] searchUrl = 'http://www.merdb.link/?search=' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(', '').replace(')', '') sname = movie_name.replace(" ", "+") mername = sname[:-1] movie_match = movie_name.replace(" ", "_") + movie_year surl = searchUrl + mername link = OPEN_URL(surl) match = re.compile( '<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img' ).findall(link) for url, name in match: if movie_match in url or movie_name_short == name: link = OPEN_URL(base_url + url) vidlinks = dom_parser.parse_dom(link, 'span', {'class': "movie_version_link"}) linknames = dom_parser.parse_dom(link, 'span', {'class': "version_host"}) for name, vidlink in zip(linknames, vidlinks): match = re.compile('<a href="(.+?)"').findall(vidlink) for linkurl in match: if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name: url = base_url + linkurl #print "URLS IS = " +url name = name.replace("'", "") linkname = tools.get_hostname(name) source = {'url': url, 'linkname': linkname} sources.append(source) return sources
def search(video_type, title, year): search_url = urlparse.urljoin(base_url, '/movie/search/') search_url += title html = _http_get(search_url, cache_limit=1) results = [] for item in dom_parser.parse_dom(html, 'div', {'class': 'ml-item'}): match_title = dom_parser.parse_dom(item, 'span', {'class': 'mli-info'}) match_url = re.search('href="([^"]+)', item, re.DOTALL) match_year = re.search('class="jt-info">(\d{4})<', item) is_episodes = dom_parser.parse_dom(item, 'span', {'class': 'mli-eps'}) if match_title and match_url and not is_episodes: match_title = match_title[0] match_title = re.sub('</?h2>', '', match_title) match_title = re.sub('\s+\d{4}$', '', match_title) url = urlparse.urljoin(match_url.group(1), 'watching.html') match_year = match_year.group(1) if match_year else '' if not year or not match_year or year == match_year: result = { 'title': match_title, 'year': match_year, 'url': _pathify_url(url) } results.append(result) return results
def __get_ep_pl_url(link_id, html): movie_id = dom_parser.parse_dom(html, 'div', {'id': 'media-player'}, 'movie-id') player_token = dom_parser.parse_dom(html, 'div', {'id': 'media-player'}, 'player-token') if movie_id and player_token: return PLAYLIST_URL2 % (movie_id[0], player_token[0], link_id)
def search(video_type, title, year): results = [] search_url = urlparse.urljoin(base_url, '/?s=') search_url += urllib.quote_plus(title) #print "search url is " +search_url html = get_url(search_url) #print "HTML IS " +html for item in dom_parser.parse_dom(html, 'div', {'class': 'item'}): match_url = dom_parser.parse_dom(item, 'a', ret='href') match_title = dom_parser.parse_dom(item, 'span', {'class': 'tt'}) match_year = dom_parser.parse_dom(item, 'span', {'class': 'year'}) if match_url and match_title: match_url = match_url[0] match_title = match_title[0] if match_year: match_year = match_year[0] else: match_year = '' if not year or not match_year or year == match_year: result = { 'url': _pathify_url(match_url), 'title': match_title, 'year': match_year } results.append(result) #print results return results
def iwatchon(name): sources = [] searchUrl = 'http://www.merdb.link/?search=' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(','').replace(')','') sname = movie_name.replace(" ","+") mername = sname[:-1] movie_match =movie_name.replace(" ","_")+movie_year surl = searchUrl + mername link = OPEN_URL(surl) match=re.compile('<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img').findall(link) for url, name in match: if movie_match in url or movie_name_short == name: link = OPEN_URL(base_url+url) vidlinks=dom_parser.parse_dom(link, 'span',{'class':"movie_version_link"}) linknames=dom_parser.parse_dom(link, 'span',{'class':"version_host"}) for name, vidlink in zip(linknames, vidlinks): match=re.compile('<a href="(.+?)"').findall(vidlink) for linkurl in match: if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name: url = base_url+linkurl #print "URLS IS = " +url name = name.replace("'","") linkname = tools.get_hostname(name) source = {'url': url, 'linkname': linkname} sources.append(source) return sources
def TVLINKPAGE(url,name): movie_name = name[:-6] year = name[-6:] movie_name = movie_name.decode('UTF-8','ignore') dlfoldername = name if settings.getSetting('tmovies_account') == 'true': COOKIELOADER(url) link = OPEN_URL(url) names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}) urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href') for name, url in zip(names, urls): main.addDir(name,url,'tvlinkpageb','','','',len(url))
def merdb(name): try: sources = [] searchUrl = base_url + '?search=' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(', '').replace(')', '') sname = movie_name.replace(" ", "+") mername = sname[:-1] movie_match = movie_name.replace(" ", "_") + movie_year surl = searchUrl + mername link = OPEN_URL(surl) #dp.update(80) match = re.compile( '<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img' ).findall(link) for url, name in match: if movie_match in url or movie_name_short == name: link = OPEN_URL(base_url + url) vidlinks = dom_parser.parse_dom( link, 'span', {'class': "movie_version_link"}) linknames = dom_parser.parse_dom(link, 'span', {'class': "version_host"}) for name, vidlink in zip(linknames, vidlinks): #dp.update(80) match = re.compile('<a href="(.+?)"').findall(vidlink) for linkurl in match: if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name: url = base_url + linkurl #print "URLS IS = " +url host = name.replace("'", "") #linkname = tools.get_hostname(name) source = { 'hostname': 'MerDB', 'views': None, 'url': url, 'host': host, 'direct': False } sources.append(source) #dp.close() sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters = [] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='MerDb', msg='(error) %s %s' % (str(e), ''), duration=5000, sound=None) return hosters
def get_sources(suf_url): source_url = suf_url hosters = [] sources = {} if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(base_url, source_url) #print "URL IS = "+url html = get_url(url) for server_list in dom_parser.parse_dom(html, 'ul', {'class': 'episodes'}): for hash_id in dom_parser.parse_dom(server_list, 'a', ret='data-id'): now = time.localtime() url = urlparse.urljoin(base_url, hash_url) #/ajax/film/episode?hash_id=%s&f=&p=%s url = url % (hash_id, now.tm_hour + now.tm_min) #print "CRAZY URL IS = "+url html =_http_get(url, headers=XHR, cache_limit=.5) #print "HTML IS = "+html if html: try: #print "I DID JSON" js_result = json.loads(html) #print js_result except ValueError: print 'Invalid JSON returned: %s: %s' % (html) log_utils.log('Invalid JSON returned: %s' % (html), log_utils.LOGWARNING) else: if 'videoUrlHash' in js_result and 'grabber' in js_result: # print "ITS IN THERE" query = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': js_result['videoUrlHash'], '_': int(time.time())} query['link'] = query['link'].replace('\/', '/') grab_url = js_result['grabber'].replace('\/', '/') grab_url += '?' + urllib.urlencode(query) html =get_url(grab_url) #print "NEW HTML IS = "+html if html: try: js_result = json.loads(html) except ValueError: print 'Invalid JSON returned: %s: %s' % (html) else: for result in js_result: if 'label' in result: quality = _height_get_quality(result['label']) else: quality = _gv_get_quality(result['file']) sources[result['file']] = quality for source in sources: hoster = {'hostname':'9Movies','multi-part': False, 'host': _get_direct_hostname(source), 'quality': sources[source], 'view': None, 'rating': None, 'url': source, 'direct': True} hosters.append(hoster) hosters = main_scrape.apply_urlresolver(hosters) return hosters
def tmovies(name): try: sources = [] searchUrl = base_url + 'watch_movie/' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(', '').replace(')', '') sname = movie_name.replace(" ", "+") movie_match = movie_name.replace(" ", "_").replace(":", "").replace("-", "") year_movie_match = movie_match + movie_year direct_movie_match = movie_match[:-1] tmurl = base_url + 'watch_movie/' + direct_movie_match ytmurl = base_url + 'watch_movie/' + year_movie_match link = OPEN_URLTM(tmurl) names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}) urls = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.', '') #host = tools.get_hostname(host) source = {'url': url, 'host': host, 'direct': False} sources.append(source) link = OPEN_URLTM(ytmurl) names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}) urls = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.', '') #host = tools.get_hostname(host) source = {'url': url, 'host': host, 'direct': False} sources.append(source) sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters = [] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='TwoMovies', msg='(error) %s %s' % (str(e), ''), duration=5000, sound=None) return hosters
def get_sources(suf_url,pre_url): source_url = suf_url hosters = [] if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(base_url, source_url) page_html = _http_get(url, cache_limit=.5) movie_id = dom_parser.parse_dom(page_html, 'div', {'id': 'media-player'}, 'movie-id') if movie_id: server_url = SL_URL % (movie_id[0]) url = urlparse.urljoin(base_url, server_url) html = _http_get(url, cache_limit=.5) sources = {} for match in re.finditer('loadEpisode\(\s*(\d+)\s*,\s*(\d+)\s*\).*?class="btn-eps[^>]*>([^<]+)', html, re.DOTALL): link_type, link_id, q_str = match.groups() if link_type in ['12', '13', '14']: url = urlparse.urljoin(base_url, PLAYLIST_URL1 % (link_id)) sources.update(__get_link_from_json(url, q_str)) else: media_url = __get_ep_pl_url(link_type, page_html) if media_url: url = urlparse.urljoin(base_url, media_url) xml = _http_get(url, cache_limit=.5) sources.update(__get_links_from_xml(xml, pre_url)) for source in sources: if sources[source]['direct']: host = _get_direct_hostname(source) else: host = urlparse.urlparse(source).hostname hoster = {'hostname':'123Movies','multi-part': False, 'host': host, 'quality': sources[source]['quality'], 'views': None, 'rating': None, 'url': source, 'direct': sources[source]['direct']} hosters.append(hoster) hosters = main_scrape.apply_urlresolver(hosters) return hosters
def search(video_type, title, year): search_url = urlparse.urljoin(base_url, '/search?keyword=%s' % (urllib.quote_plus(title))) html = get_url(search_url) results = [] match_year = '' fragment = dom_parser.parse_dom(html, 'ul', {'class': 'movie-list'}) if fragment: for item in dom_parser.parse_dom(fragment[0], 'li'): if dom_parser.parse_dom(item, 'div', {'class': '[^"]*episode[^"]*'}): continue match = re.search('href="([^"]+).*?title="([^"]+)', item) if match: match_url, match_title = match.groups() if not year or not match_year or year == match_year: result = {'title': match_title, 'year': '', 'url': _pathify_url(match_url)} results.append(result) return results
def tmovies_tv(name, movie_title): try: sources = [] searchUrl = base_url + 'watch_episode/' # if 'House' in movie_title: # movie_title = movie_title.replace('House','DR House') movie_name = movie_title[:-6] movie_name_short = movie_title[:-7] movie_year = movie_title[-6:] movie_year = movie_year.replace('(', '').replace(')', '') movie_match = movie_name.replace(" ", "_").replace(":", "").replace("-", "") year_movie_match = movie_match + movie_year direct_movie_match = movie_match[:-1] seasons = re.compile('S(.+?)E(.+?) ').findall(name) for sea, epi in seasons: tmurl = searchUrl + direct_movie_match + '/' + sea + '/' + epi + '/' link = OPEN_URLTM(tmurl) names = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}) urls = dom_parser.parse_dom(link, 'a', {'class': "norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.', '') #host = tools.get_hostname(host) source = {'url': url, 'host': host, 'direct': False} sources.append(source) sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters = [] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='TwoMovies', msg='(error) %s %s' % (str(e), ''), duration=5000, sound=None) return hosters
def merdb(name): try: sources = [] searchUrl = base_url+'?search=' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(','').replace(')','') sname = movie_name.replace(" ","+") mername = sname[:-1] movie_match =movie_name.replace(" ","_")+movie_year surl = searchUrl + mername link = OPEN_URL(surl) #dp.update(80) match=re.compile('<div class="main_list_box"><a href="(.+?)" title="(.+?)"><img').findall(link) for url, name in match: if movie_match in url or movie_name_short == name: link = OPEN_URL(base_url+url) vidlinks=dom_parser.parse_dom(link, 'span',{'class':"movie_version_link"}) linknames=dom_parser.parse_dom(link, 'span',{'class':"version_host"}) for name, vidlink in zip(linknames, vidlinks): #dp.update(80) match=re.compile('<a href="(.+?)"').findall(vidlink) for linkurl in match: if "ads.php" not in linkurl and "Sponsor" not in name and "Host" not in name: url = base_url+linkurl #print "URLS IS = " +url host = name.replace("'","") #linkname = tools.get_hostname(name) source = {'hostname':'MerDB','views':None,'url': url, 'host': host, 'direct':False} sources.append(source) #dp.close() sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters =[] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='MerDb',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None) return hosters
def tmovies(name): try: sources = [] searchUrl = base_url+'watch_movie/' movie_name = name[:-6] movie_name_short = name[:-7] movie_year = name[-6:] movie_year = movie_year.replace('(','').replace(')','') sname = movie_name.replace(" ","+") movie_match =movie_name.replace(" ","_").replace(":","").replace("-","") year_movie_match = movie_match+movie_year direct_movie_match = movie_match[:-1] tmurl = base_url+'watch_movie/'+direct_movie_match ytmurl = base_url+'watch_movie/'+year_movie_match link = OPEN_URLTM(tmurl) names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}) urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.','') #host = tools.get_hostname(host) source = {'url': url, 'host':host,'direct':False} sources.append(source) link = OPEN_URLTM(ytmurl) names = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}) urls = dom_parser.parse_dom(link, 'a',{'class':"norm vlink"}, ret='href') for host, url in zip(names, urls): host = host.replace('www.','') #host = tools.get_hostname(host) source = {'url': url, 'host':host,'direct':False} sources.append(source) sources = main_scrape.apply_urlresolver(sources) return sources except Exception as e: hosters =[] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='TwoMovies',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None) return hosters
def search(video_type, title, year): search_url = urlparse.urljoin(base_url, '/movie/search/') search_url += title html = _http_get(search_url, cache_limit=1) results = [] for item in dom_parser.parse_dom(html, 'div', {'class': 'ml-item'}): match_title = dom_parser.parse_dom(item, 'span', {'class': 'mli-info'}) match_url = re.search('href="([^"]+)', item, re.DOTALL) match_year = re.search('class="jt-info">(\d{4})<', item) is_episodes = dom_parser.parse_dom(item, 'span', {'class': 'mli-eps'}) if match_title and match_url and not is_episodes: match_title = match_title[0] match_title = re.sub('</?h2>', '', match_title) match_title = re.sub('\s+\d{4}$', '', match_title) url = urlparse.urljoin(match_url.group(1), 'watching.html') match_year = match_year.group(1) if match_year else '' if not year or not match_year or year == match_year: result = {'title': match_title, 'year': match_year, 'url': _pathify_url(url)} results.append(result) return results
def search(video_type, title, year): search_url = urlparse.urljoin( base_url, '/search?keyword=%s' % (urllib.quote_plus(title))) html = get_url(search_url) results = [] match_year = '' fragment = dom_parser.parse_dom(html, 'ul', {'class': 'movie-list'}) if fragment: for item in dom_parser.parse_dom(fragment[0], 'li'): if dom_parser.parse_dom(item, 'div', {'class': '[^"]*episode[^"]*'}): continue match = re.search('href="([^"]+).*?title="([^"]+)', item) if match: match_url, match_title = match.groups() if not year or not match_year or year == match_year: result = { 'title': match_title, 'year': '', 'url': _pathify_url(match_url) } results.append(result) return results
def get_sources(suf_url, pre_url): source_url = suf_url hosters = [] if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(base_url, source_url) page_html = _http_get(url, cache_limit=.5) movie_id = dom_parser.parse_dom(page_html, 'div', {'id': 'media-player'}, 'movie-id') if movie_id: server_url = SL_URL % (movie_id[0]) url = urlparse.urljoin(base_url, server_url) html = _http_get(url, cache_limit=.5) sources = {} for match in re.finditer( 'loadEpisode\(\s*(\d+)\s*,\s*(\d+)\s*\).*?class="btn-eps[^>]*>([^<]+)', html, re.DOTALL): link_type, link_id, q_str = match.groups() if link_type in ['12', '13', '14']: url = urlparse.urljoin(base_url, PLAYLIST_URL1 % (link_id)) sources.update(__get_link_from_json(url, q_str)) else: media_url = __get_ep_pl_url(link_type, page_html) if media_url: url = urlparse.urljoin(base_url, media_url) xml = _http_get(url, cache_limit=.5) sources.update(__get_links_from_xml(xml, pre_url)) for source in sources: if sources[source]['direct']: host = _get_direct_hostname(source) else: host = urlparse.urlparse(source).hostname hoster = { 'hostname': '123Movies', 'multi-part': False, 'host': host, 'quality': sources[source]['quality'], 'views': None, 'rating': None, 'url': source, 'direct': sources[source]['direct'] } hosters.append(hoster) hosters = main_scrape.apply_urlresolver(hosters) return hosters
def get_sources(suf_url): source_url = suf_url hosters = [] sources = {} if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(base_url, source_url) #print "URL IS = "+url html = get_url(url) for server_list in dom_parser.parse_dom(html, 'ul', {'class': 'episodes'}): for hash_id in dom_parser.parse_dom(server_list, 'a', ret='data-id'): now = time.localtime() url = urlparse.urljoin( base_url, hash_url) #/ajax/film/episode?hash_id=%s&f=&p=%s url = url % (hash_id, now.tm_hour + now.tm_min) #print "CRAZY URL IS = "+url html = _http_get(url, headers=XHR, cache_limit=.5) #print "HTML IS = "+html if html: try: #print "I DID JSON" js_result = json.loads(html) #print js_result except ValueError: print 'Invalid JSON returned: %s: %s' % (html) log_utils.log('Invalid JSON returned: %s' % (html), log_utils.LOGWARNING) else: if 'videoUrlHash' in js_result and 'grabber' in js_result: # print "ITS IN THERE" query = { 'flash': 1, 'json': 1, 's': now.tm_min, 'link': js_result['videoUrlHash'], '_': int(time.time()) } query['link'] = query['link'].replace('\/', '/') grab_url = js_result['grabber'].replace('\/', '/') grab_url += '?' + urllib.urlencode(query) html = get_url(grab_url) #print "NEW HTML IS = "+html if html: try: js_result = json.loads(html) except ValueError: print 'Invalid JSON returned: %s: %s' % ( html) else: for result in js_result: if 'label' in result: quality = _height_get_quality( result['label']) else: quality = _gv_get_quality( result['file']) sources[result['file']] = quality for source in sources: hoster = { 'multi-part': False, 'host': _get_direct_hostname(source), 'quality': sources[source], 'view': None, 'rating': None, 'url': source, 'direct': True } hosters.append(hoster) hosters = main_scrape.apply_urlresolver(hosters) return hosters
def __get_ep_pl_url( link_id, html): movie_id = dom_parser.parse_dom(html, 'div', {'id': 'media-player'}, 'movie-id') player_token = dom_parser.parse_dom(html, 'div', {'id': 'media-player'}, 'player-token') if movie_id and player_token: return PLAYLIST_URL2 % (movie_id[0], player_token[0], link_id)