예제 #1
0
def LISTADO_rss(url):  # MODE 1   
    req = urllib2.Request(url)
    # req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
    response = urllib2.urlopen(req)
    link = response.read()
    response.close()
    matches = dataparser.etiqueta_maestra(link, "<item>(.*?)</item>")
    for item in matches:
        NOMBRE = dataparser.subetiqueta(item, "<title>(.*?)</title>")
        LINK = dataparser.subetiqueta(item, '<enclosure url="(.+?)" length=".+?" type="audio/mpeg" />')
        addDir(NOMBRE, LINK, 20, "")
예제 #2
0
def LISTADO_rss(url):  # MODE 1   
    req = urllib2.Request(url)
    # req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
    response = urllib2.urlopen(req)
    link = response.read()
    response.close()
    matches = dataparser.etiqueta_maestra(link, "<item>(.*?)</item>")
    for item in matches:
        NOMBRE = dataparser.subetiqueta(item, "<title>(.*?)</title>")
        LINK = dataparser.subetiqueta(item, '<enclosure url="(.+?)" length=".+?" type="audio/mpeg" />')
        addDir(NOMBRE, LINK, 20, "")
예제 #3
0
def database_reader():

    opciones = open(database, "r+")
    contenido = opciones.read()

    matches = dataparser.etiqueta_maestra(contenido, "<item>(.*?)</item>")
    for item in matches:
        NAME = dataparser.subetiqueta(item, "<name>(.*?)</name>")
        THUMB = dataparser.subetiqueta(item, "<thumb>(.*?)</thumb>")
        LINK = dataparser.subetiqueta(item, "<link>(.*?)</link>")
        id = dataparser.subetiqueta(item, "<id>(.*?)</id>")
        title_sc = dataparser.subetiqueta(item, "<title>(.*?)</title>")
        tags = dataparser.subetiqueta(item, "<tags>(.*?)</tags>")

        if Config.getSetting("engine") == '1':
            torrent = "plugin://plugin.video.pulsar/play?uri=" + str(LINK)
        else:
            torrent = "plugin://plugin.video.kmediatorrent/play/" + str(LINK)

        if Config.getSetting("enable_tags") == 'true':

            if Config.getSetting("anal") == 'true':
                tag1 = "Anal"
            else:
                tag1 = ""

            if Config.getSetting("blackhair") == 'true':
                tag2 = "Black Hair"
            else:
                tag2 = ""

            if Config.getSetting("blonde") == 'true':
                tag3 = "Blonde"
            else:
                tag3 = ""

            if Config.getSetting("b*****b") == 'true':
                tag4 = "B*****b"
            else:
                tag4 = ""

            if Config.getSetting("brunette") == 'true':
                tag5 = "Brunette"
            else:
                tag5 = ""

            if Config.getSetting("dbpen") == 'true':
                tag6 = "Double Penetration"
            else:
                tag6 = ""

            if Config.getSetting("ebony") == 'true':
                tag7 = "Ebony"
            else:
                tag7 = ""

            if Config.getSetting("handjob") == 'true':
                tag8 = "Handjob"
            else:
                tag8 = ""

            if Config.getSetting("htits") == 'true':
                tag9 = "Huge T**s"
            else:
                tag9 = ""

            if Config.getSetting("lesbian") == 'true':
                tag10 = "Lesbian"
            else:
                tag10 = ""

            if Config.getSetting("milf") == 'true':
                tag11 = "MILF"
            else:
                tag11 = ""

            if Config.getSetting("naturalt") == 'true':
                tag12 = "Natural T**s"
            else:
                tag12 = ""

            if Config.getSetting("redhead") == 'true':
                tag13 = "Red Head"
            else:
                tag13 = ""

            if Config.getSetting("sextoys") == 'true':
                tag14 = "Sex Toys"
            else:
                tag14 = ""

            if Config.getSetting("squirt") == 'true':
                tag15 = "Squirt"
            else:
                tag15 = ""

            if Config.getSetting("tittyfuck") == 'true':
                tag16 = "Tittyfuck"
            else:
                tag16 = ""

            if tag1 in str(tags) and tag2 in str(tags) and tag3 in str(
                    tags
            ) and tag4 in str(tags) and tag5 in str(tags) and tag6 in str(
                    tags) and tag7 in str(tags) and tag8 in str(
                        tags) and tag9 in str(tags) and tag10 in str(
                            tags) and tag11 in str(tags) and tag12 in str(
                                tags) and tag13 in str(tags) and tag14 in str(
                                    tags) and tag15 in str(
                                        tags) and tag16 in str(tags):
                addLink(NAME, torrent, THUMB)
        else:
            """
            Config.setSetting("anal", 'false')
            Config.setSetting("blackhair", 'false')
            Config.setSetting("blonde", 'false')
            Config.setSetting("b*****b", 'false')
            Config.setSetting("brunette", 'false')
            Config.setSetting("dbpen", 'false')
            Config.setSetting("ebony", 'false')
            Config.setSetting("handjob", 'false')
            Config.setSetting("htits", 'false')
            Config.setSetting("lesbian", 'false')
            Config.setSetting("milf", 'false')
            Config.setSetting("naturalt", 'false')
            Config.setSetting("redhead", 'false')
            Config.setSetting("sextoys", 'false')
            Config.setSetting("squirt", 'false')
            Config.setSetting("tittyfuck", 'false')
            """
            addLink(NAME, torrent, THUMB)
예제 #4
0
def mixer():

    lines = open(file).readlines()
    print "LINEA: " + lines[0]
    open(output, 'wb').writelines(lines[0])
    move(output, file)

    f = open(database2, "w+")

    opciones = open(offline, "r+")
    contenido = opciones.read()

    NOM_list = []
    SIZE_list = []
    THUMB_list = []
    CATZZ_list = []
    SECC_list = []

    matches = dataparser.etiqueta_maestra(contenido, "<item>(.*?)</item>")
    for item in matches:
        NAME = dataparser.subetiqueta(item, "<name>(.*?)</name>")
        THUMB = dataparser.subetiqueta(item, "<thumb>(.*?)</thumb>")
        SIZE = dataparser.subetiqueta(item, "<size>(.*?)</size>")
        CATZZ = dataparser.subetiqueta(item, "<tags>(.*?)</tags>")
        SECC = dataparser.subetiqueta(item, "<secc>(.*?)</secc>")

        NOM_list.append(NAME)
        SIZE_list.append(SIZE)
        THUMB_list.append(THUMB)
        CATZZ_list.append(CATZZ)
        SECC_list.append(SECC)

    i = 0

    while i < int(len(NOM_list)):

        searchfile = open(file2)

        for line in searchfile:
            if SIZE_list[i] in line:
                rawtitle = re.compile('<title>(.+?)</title>').findall(line)
                cleantitle = str(rawtitle[0]).replace("&#039;", "'")
                title_sc = str(cleantitle).replace("&#39;", "'")
                rawid = re.compile('<id>(.+?)</id>').findall(line)
                id = str(rawid[0])
                match = re.compile('<url>(.+?)</url>').findall(line)
                nommatch = NOM_list[i].split(" ")
                if len(str(nommatch[0])) < 4:
                    matchtitle = str(nommatch[1])
                else:
                    matchtitle = str(nommatch[0])

                if matchtitle.lower() in title_sc.lower():
                    print matchtitle.lower()
                    print title_sc.lower()

                    f.write(id + "\n")
                    f.write("<item>\n")
                    f.write("<name>" + NOM_list[i] + "</name>\n")
                    f.write("<title>" + title_sc + "</title>\n")
                    f.write("<thumb>" + THUMB_list[i] + "</thumb>\n")
                    f.write("<link>" + str(match[0]) + "</link>\n")
                    f.write("<size>" + SIZE_list[i] + "</size>\n")
                    f.write("<tags>" + CATZZ_list[i] + "</tags>\n")
                    f.write("<secc>" + SECC_list[i] + "</secc>\n")
                    f.write("<id>" + id + "</id>\n")
                    f.write("</item>\n")
                    f.write("\n")
                    f.write("\n")

            else:
                pass

        i += 1

    searchfile.close()
    f.close()

    content = ''
    for line in db_filenames:
        content = content + open(line).read()
        open(output, 'wb').write(content)
    move(output, database)
    open(timestamp, 'w+').write(st)
    dialog.ok('ZZ', 'The [B]DATABASE[/B] is updated!')
    sys.exit()
예제 #5
0
def database_reader():
    
    opciones = open(database, "r+")
    contenido = opciones.read()
    

    matches = dataparser.etiqueta_maestra(contenido, "<item>(.*?)</item>")
    for item in matches:
        NAME = dataparser.subetiqueta(item, "<name>(.*?)</name>")
        THUMB = dataparser.subetiqueta(item, "<thumb>(.*?)</thumb>")
        LINK = dataparser.subetiqueta(item, "<link>(.*?)</link>")
        id = dataparser.subetiqueta(item, "<id>(.*?)</id>")
        title_sc = dataparser.subetiqueta(item, "<title>(.*?)</title>")
        tags = dataparser.subetiqueta(item, "<tags>(.*?)</tags>")
        
                        
                    
        if Config.getSetting("engine") == '1':
            torrent = "plugin://plugin.video.pulsar/play?uri=" + str(LINK)
        else:
            torrent = "plugin://plugin.video.kmediatorrent/play/" + str(LINK)
        
        if Config.getSetting("enable_tags") == 'true':
            
            
            if Config.getSetting("anal") == 'true':
                tag1 = "Anal"
            else:
                tag1 = ""
                
        
            if Config.getSetting("blackhair") == 'true':
                tag2 = "Black Hair"
            else:
                tag2 = ""
                
                
            if Config.getSetting("blonde") == 'true':
                tag3 = "Blonde"
            else:
                tag3 = ""
                
                
            if Config.getSetting("b*****b") == 'true':
                tag4 = "B*****b"
            else:
                tag4 = ""
        
        
            if Config.getSetting("brunette") == 'true':
                tag5 = "Brunette"
            else:
                tag5 = ""
                
                
            if Config.getSetting("dbpen") == 'true':
                tag6 = "Double Penetration"
            else:
                tag6 = ""
        
        
            if Config.getSetting("ebony") == 'true':
                tag7 = "Ebony"
            else:
                tag7 = ""
                
                
            if Config.getSetting("handjob") == 'true':
                tag8 = "Handjob"
            else:
                tag8 = ""
                
                
            if Config.getSetting("htits") == 'true':
                tag9 = "Huge T**s"
            else:
                tag9 = ""
                
        
            if Config.getSetting("lesbian") == 'true':
                tag10 = "Lesbian"
            else:
                tag10 = ""
                
                
            if Config.getSetting("milf") == 'true':
                tag11 = "MILF"
            else:
                tag11 = ""
        
        
            if Config.getSetting("naturalt") == 'true':
                tag12 = "Natural T**s"
            else:
                tag12 = ""
                
                
            if Config.getSetting("redhead") == 'true':
                tag13 = "Red Head"
            else:
                tag13 = ""
                
                
            if Config.getSetting("sextoys") == 'true':
                tag14 = "Sex Toys"
            else:
                tag14 = ""
        
        
            if Config.getSetting("squirt") == 'true':
                tag15 = "Squirt"
            else:
                tag15 = ""
                
            
            if Config.getSetting("tittyfuck") == 'true':
                tag16 = "Tittyfuck"
            else:
                tag16 = ""
        
            
            if tag1 in str(tags) and tag2 in str(tags) and tag3 in str(tags) and tag4 in str(tags) and tag5 in str(tags) and tag6 in str(tags) and tag7 in str(tags) and tag8 in str(tags) and tag9 in str(tags) and tag10 in str(tags) and tag11 in str(tags) and tag12 in str(tags) and tag13 in str(tags) and tag14 in str(tags) and tag15 in str(tags) and tag16 in str(tags):
                addLink(NAME,torrent,THUMB)
        else:
            """
            Config.setSetting("anal", 'false')
            Config.setSetting("blackhair", 'false')
            Config.setSetting("blonde", 'false')
            Config.setSetting("b*****b", 'false')
            Config.setSetting("brunette", 'false')
            Config.setSetting("dbpen", 'false')
            Config.setSetting("ebony", 'false')
            Config.setSetting("handjob", 'false')
            Config.setSetting("htits", 'false')
            Config.setSetting("lesbian", 'false')
            Config.setSetting("milf", 'false')
            Config.setSetting("naturalt", 'false')
            Config.setSetting("redhead", 'false')
            Config.setSetting("sextoys", 'false')
            Config.setSetting("squirt", 'false')
            Config.setSetting("tittyfuck", 'false')
            """
            addLink(NAME,torrent,THUMB)
예제 #6
0
def mixer():
    
    lines = open(file).readlines()
    print "LINEA: " + lines[0]
    open(output, 'wb').writelines(lines[0])
    move(output,file)
    
    f = open(database2,"w+")
    
    opciones = open(offline, "r+")
    contenido = opciones.read()
    
    NOM_list = []
    SIZE_list = []
    THUMB_list = []
    CATZZ_list = []
    SECC_list = []
    
    matches = dataparser.etiqueta_maestra(contenido, "<item>(.*?)</item>")
    for item in matches:
        NAME = dataparser.subetiqueta(item, "<name>(.*?)</name>")
        THUMB = dataparser.subetiqueta(item, "<thumb>(.*?)</thumb>")
        SIZE = dataparser.subetiqueta(item, "<size>(.*?)</size>")
        CATZZ = dataparser.subetiqueta(item, "<tags>(.*?)</tags>")
        SECC = dataparser.subetiqueta(item, "<secc>(.*?)</secc>")
        
        NOM_list.append(NAME)
        SIZE_list.append(SIZE)
        THUMB_list.append(THUMB)       
        CATZZ_list.append(CATZZ)
        SECC_list.append(SECC)
    
    i = 0
    
    while i < int(len(NOM_list)):
        
        searchfile = open(file2)
    
        for line in searchfile:
            if SIZE_list[i] in line:
                rawtitle = re.compile('<title>(.+?)</title>').findall(line)
                cleantitle = str(rawtitle[0]).replace("&#039;", "'")
                title_sc = str(cleantitle).replace("&#39;", "'")
                rawid = re.compile('<id>(.+?)</id>').findall(line)
                id = str(rawid[0])
                match = re.compile('<url>(.+?)</url>').findall(line)
                nommatch = NOM_list[i].split(" ")
                if len(str(nommatch[0])) < 4:
                    matchtitle = str(nommatch[1])
                else:
                    matchtitle = str(nommatch[0])
                           
                    
                if matchtitle.lower() in title_sc.lower():
                    print matchtitle.lower()
                    print title_sc.lower()
                    

                    f.write(id +"\n")
                    f.write("<item>\n")
                    f.write("<name>" +NOM_list[i] +"</name>\n")
                    f.write("<title>" +title_sc +"</title>\n")
                    f.write("<thumb>" +THUMB_list[i] +"</thumb>\n")
                    f.write("<link>" +str(match[0]) +"</link>\n")
                    f.write("<size>" +SIZE_list[i] +"</size>\n")
                    f.write("<tags>" +CATZZ_list[i] +"</tags>\n")
                    f.write("<secc>" +SECC_list[i] +"</secc>\n")
                    f.write("<id>" +id +"</id>\n")
                    f.write("</item>\n")
                    f.write("\n")
                    f.write("\n")
                     
                        
            else:
                pass
        
        i +=1
        
    searchfile.close()
    f.close()


    content = ''
    for line in db_filenames:
        content = content + open(line).read()
        open(output,'wb').write(content)
    move(output,database)
    open(timestamp,'w+').write(st)
    dialog.ok('ZZ', 'The [B]DATABASE[/B] is updated!')
    sys.exit()
예제 #7
0
def xml_parser(masterurl):
    
    try:
    
        f = open(file,"w+")
    
        f.write("- LISTA DE ENLACES CAIDOS:\n")
        f.write("\n")
           
        req = urllib2.Request(masterurl)
        req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
        response = urllib2.urlopen(req)
        link = response.read()
        response.close()
        matches = dataparser.etiqueta_maestra(link, "<item>(.*?)</item>")
        for item in matches:
            name = dataparser.subetiqueta(item, "<title>(.*?)</title>")
            url = dataparser.subetiqueta(item, "<link>(.*?)</link>")
            thumb = dataparser.subetiqueta(item, "<thumbnail>(.*?)</thumbnail>")
            if url == "":
                pass
            else:
                if re.match(".*allmyvideos.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()            
                    match = re.compile('The file you were looking for could not be found, (.+?).').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(name + "[COLOR red] [broken] [/COLOR][COLOR ff6495ed]allmyvideos[/COLOR]", url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else: pass
                elif re.match(".*streamcloud.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()            
                    match = re.compile('<h1>(.+?) Not Found</h1>').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(name + "[COLOR red] [broken] [/COLOR][COLOR cadetblue]streamcloud[/COLOR]", url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else: pass
                elif re.match(".*played.to.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()            
                    match = re.compile('<b class="err" style=".+?">Removed (.+?).</b>').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(name + "[COLOR red] [broken] [/COLOR][COLOR ff9acd32]played.to[/COLOR]", url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else: pass
                elif re.match(".*nowvideo.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()            
                    match = re.compile('<h3>(.+?) no longer exists on our servers.</h3>').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(name + "[COLOR red] [broken] [/COLOR][COLOR gold]nowvideo[/COLOR]", url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else: pass
                elif re.match(".*vidspot.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()            
                    match = re.compile('<b>(.+?) Not Found</b>').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(name + "[COLOR red] [broken] [/COLOR][COLOR ff0000cd]vidspot[/COLOR]", url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else: pass
                elif re.match(".*vk.com.*", url.lower()):
                    try:
                        # print url
                        if re.match(".*m.vk.com/video[0-9].*", url.lower()):
                            addLink(name + "[COLOR darkorange] [untestable] [/COLOR][COLOR skyblue]VK MOVIL[/COLOR]", url, thumb)
                            f.write("* " + name + " [untestable]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            req = urllib2.Request(url)
                            req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
                            response = urllib2.urlopen(req)
                            link = response.read()
                            response.close()            
                            match = re.compile('(.+?) has been removed').findall(link)
                            # print "THIS" + str(match)
                            for yes in match:
                                if yes != "":
                                    addLink(name + "[COLOR red] [broken] [/COLOR][COLOR lightsteelblue]VK[/COLOR]", url, thumb)
                                    f.write("* " + name + " [broken]\n")
                                    f.write("url: " + url + "\n")
                                    f.write("\n")
                                else: pass
                    except:
                        # print url
                        # print "[ERROR]"
                        addLink(name + "[COLOR yellow] [BLOCKED] [/COLOR][COLOR skyblue]VK MOVIL[/COLOR]", url, thumb)
                        f.write("* " + name + " [BLOCKED]\n")
                        f.write("url: " + url + "\n")
                        f.write("\n")
                elif re.match(".*youtube.*", url.lower()):
                        pass
                else:
                    print "OUTSIDE " + url
                    server = url.split("/")
                    if Config.getSetting("notsupp") == 'true':
                        addLink(name + "[COLOR hotpink] [notSUPP] [/COLOR]" + "[COLOR antiquewhite]" + server[2] + "[/COLOR]", url, thumb)
                        f.write("* " + name + " [notSUPP]\n")
                        f.write("url: " + url + "\n")
                        f.write("\n")
                    else:
                        pass
        f.close()

    except:
        error = dialog.ok('ERROR', 'La URL no es válida o la página está caída.')
        if error == True:
            sett = Config.openSettings()
            sys.exit()            
        else:
            sys.exit()
예제 #8
0
def xml_parser(masterurl):

    try:

        f = open(file, "w+")

        f.write("- LISTA DE ENLACES CAIDOS:\n")
        f.write("\n")

        req = urllib2.Request(masterurl)
        req.add_header(
            'User-Agent',
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
        )
        response = urllib2.urlopen(req)
        link = response.read()
        response.close()
        matches = dataparser.etiqueta_maestra(link, "<item>(.*?)</item>")
        for item in matches:
            name = dataparser.subetiqueta(item, "<title>(.*?)</title>")
            url = dataparser.subetiqueta(item, "<link>(.*?)</link>")
            thumb = dataparser.subetiqueta(item,
                                           "<thumbnail>(.*?)</thumbnail>")
            if url == "":
                pass
            else:
                if re.match(".*allmyvideos.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                    )
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()
                    match = re.compile(
                        'The file you were looking for could not be found, (.+?).'
                    ).findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(
                                name +
                                "[COLOR red] [broken] [/COLOR][COLOR ff6495ed]allmyvideos[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            pass
                elif re.match(".*streamcloud.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                    )
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()
                    match = re.compile('<h1>(.+?) Not Found</h1>').findall(
                        link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(
                                name +
                                "[COLOR red] [broken] [/COLOR][COLOR cadetblue]streamcloud[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            pass
                elif re.match(".*played.to.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                    )
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()
                    match = re.compile(
                        '<b class="err" style=".+?">Removed (.+?).</b>'
                    ).findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(
                                name +
                                "[COLOR red] [broken] [/COLOR][COLOR ff9acd32]played.to[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            pass
                elif re.match(".*nowvideo.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                    )
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()
                    match = re.compile(
                        '<h3>(.+?) no longer exists on our servers.</h3>'
                    ).findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(
                                name +
                                "[COLOR red] [broken] [/COLOR][COLOR gold]nowvideo[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            pass
                elif re.match(".*vidspot.*", url.lower()):
                    # print url
                    req = urllib2.Request(url)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                    )
                    response = urllib2.urlopen(req)
                    link = response.read()
                    response.close()
                    match = re.compile('<b>(.+?) Not Found</b>').findall(link)
                    # print "THIS" + str(match)
                    for yes in match:
                        if yes != "":
                            addLink(
                                name +
                                "[COLOR red] [broken] [/COLOR][COLOR ff0000cd]vidspot[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [broken]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            pass
                elif re.match(".*vk.com.*", url.lower()):
                    try:
                        # print url
                        if re.match(".*m.vk.com/video[0-9].*", url.lower()):
                            addLink(
                                name +
                                "[COLOR darkorange] [untestable] [/COLOR][COLOR skyblue]VK MOVIL[/COLOR]",
                                url, thumb)
                            f.write("* " + name + " [untestable]\n")
                            f.write("url: " + url + "\n")
                            f.write("\n")
                        else:
                            req = urllib2.Request(url)
                            req.add_header(
                                'User-Agent',
                                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
                            )
                            response = urllib2.urlopen(req)
                            link = response.read()
                            response.close()
                            match = re.compile(
                                '(.+?) has been removed').findall(link)
                            # print "THIS" + str(match)
                            for yes in match:
                                if yes != "":
                                    addLink(
                                        name +
                                        "[COLOR red] [broken] [/COLOR][COLOR lightsteelblue]VK[/COLOR]",
                                        url, thumb)
                                    f.write("* " + name + " [broken]\n")
                                    f.write("url: " + url + "\n")
                                    f.write("\n")
                                else:
                                    pass
                    except:
                        # print url
                        # print "[ERROR]"
                        addLink(
                            name +
                            "[COLOR yellow] [BLOCKED] [/COLOR][COLOR skyblue]VK MOVIL[/COLOR]",
                            url, thumb)
                        f.write("* " + name + " [BLOCKED]\n")
                        f.write("url: " + url + "\n")
                        f.write("\n")
                elif re.match(".*youtube.*", url.lower()):
                    pass
                else:
                    print "OUTSIDE " + url
                    server = url.split("/")
                    if Config.getSetting("notsupp") == 'true':
                        addLink(
                            name + "[COLOR hotpink] [notSUPP] [/COLOR]" +
                            "[COLOR antiquewhite]" + server[2] + "[/COLOR]",
                            url, thumb)
                        f.write("* " + name + " [notSUPP]\n")
                        f.write("url: " + url + "\n")
                        f.write("\n")
                    else:
                        pass
        f.close()

    except:
        error = dialog.ok('ERROR',
                          'La URL no es válida o la página está caída.')
        if error == True:
            sett = Config.openSettings()
            sys.exit()
        else:
            sys.exit()