def update(metadata, siteID, movieGenres, movieActors):
    temp = str(metadata.id).split("|")[0]

    url = PAsearchSites.getSearchSearchURL(siteID) + temp
    Log('scene url: ' + url)
    try:
        detailsPageElements = HTML.ElementFromURL(url)
    except:
        response = urllib.urlopen(url)
        htmlstring = response.read()
        detailsPageElements = fromstring(htmlstring)

    metadata.studio = "P**n Pros"

    # Collections / Tagline
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Summary
    try:
        metadata.summary = detailsPageElements.xpath(
            '//meta[@name="description"]')[0].get('content').strip()
    except:
        pass

    try:
        if siteName.lower() == "Cum4K".lower():

            summaryurl = "https://cum4k.tube/" + temp
            Log(summaryurl)
            summaryPageElements = HTML.ElementFromURL(summaryurl)
            metadata.summary = summaryPageElements.xpath(
                '//p[@class="more"]/text()')[0].strip()
    except:
        Log("did not pull tube summary")
        pass

    # Actors
    movieActors.clearActors()
    titleActors = ""
    actors = detailsPageElements.xpath(
        '//div[@class="details col-sm-6 col-md-3 order-md-2 mb-2"]//div[@class="row"]//div[@class="col-6 col-md-12"]//a'
    )
    if len(actors) > 0:
        for actorLink in actors:
            actorName = actorLink.text_content()
            actorPhotoURL = PAactors.actorDBfinder(actorName)
            titleActors = titleActors + actorName + " & "
            Log("actorPhoto: " + actorPhotoURL)
            movieActors.addActor(actorName, actorPhotoURL)
        titleActors = titleActors[:-3]

    # Manually Add Actors
    # Add Actor Based on Title
    if "Poke Her In The Front" == metadata.title:
        actorName = "Sara Luv"
        actorPhotoURL = ''
        movieActors.addActor(actorName, actorPhotoURL)
        actorName = "Dillion Harper"
        actorPhotoURL = ''
        movieActors.addActor(actorName, actorPhotoURL)

    # Genres
    movieGenres.clearGenres()
    # Based on site
    if siteName.lower() == "Lubed".lower():
        for genreName in ['Lube', 'Raw', 'Wet']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "Holed".lower():
        for genreName in ['Anal', 'Ass']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "POVD".lower():
        for genreName in ['Gonzo', 'POV']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "MassageCreep".lower():
        for genreName in ['Massage', 'Oil']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "DeepThroatLove".lower():
        for genreName in ['B*****b', 'Deep Throat']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "PureMature".lower():
        for genreName in ['MILF', 'Mature']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "Cum4K".lower():
        for genreName in ['Creampie']:
            movieGenres.addGenre(genreName)
    # Based on number of actors
    if len(actors) == 3:
        movieGenres.addGenre('Threesome')
    if len(actors) == 4:
        movieGenres.addGenre('Foursome')
    if len(actors) > 4:
        movieGenres.addGenre('Orgy')

    # Posters
    try:
        background = "http:" + detailsPageElements.xpath(
            '//video[@id="player"]')[0].get('poster')
        Log("BG DL: " + background)
        metadata.art[background] = Proxy.Preview(HTTP.Request(
            background, headers={
                'Referer': 'http://www.google.com'
            }).content,
                                                 sort_order=1)
        metadata.posters[background] = Proxy.Preview(HTTP.Request(
            background, headers={
                'Referer': 'http://www.google.com'
            }).content,
                                                     sort_order=1)
    except:
        pass

    # Date
    date = detailsPageElements.xpath(
        '//div[contains(@class,"details")]//p')[0].text_content().strip()
    Log('Date: ' + date)
    date_object = datetime.strptime(date, '%B %d, %Y')
    metadata.originally_available_at = date_object
    metadata.year = metadata.originally_available_at.year

    # Title
    metadata.title = detailsPageElements.xpath(
        '//div[contains(@class,"details")]//h1')[0].text_content().strip()

    #Extra Posters
    import random
    art = []
    match = 0

    if siteName.lower() == "Holed".lower():
        fanSite = PAextras.getFanArt("AnalPornFan.com", art, actors, actorName,
                                     metadata.title, match)
    elif siteName.lower() == "SpyFam".lower():
        fanSite = PAextras.getFanArt("SpyFams.com", art, actors, actorName,
                                     metadata.title, match)
    elif siteName.lower() == "Lubed".lower():
        fanSite = PAextras.getFanArt("LubedFan.com", art, actors, actorName,
                                     metadata.title, match)
    elif siteName.lower() == "PassionHD".lower():
        for site in ["PassionHDFan.com", "HQSluts.com"]:
            fanSite = PAextras.getFanArt(site, art, actors, actorName,
                                         metadata.title, match)
            match = fanSite[2]
    else:
        fanSite = PAextras.getFanArt("HQSluts.com", art, actors, actorName,
                                     metadata.title, match)

    summary = fanSite[1]
    match = fanSite[2]

    try:
        if len(summary) > 0:
            metadata.summary = summary
    except:
        metadata.summary = summary

    if match is 1:
        # Return, first, last and randóm selection of images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[1], art[2], art[3], art[-1]] + random.sample(
            art, 4)
        art = sample
        Log("Selecting first 5, last and random 4 images from set")

        j = 1

        for posterUrl in art:
            Log("Trying next Image")
            if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
                #Download image file for analysis
                try:
                    img_file = urllib.urlopen(posterUrl)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(
                            HTTP.Request(posterUrl,
                                         headers={
                                             'Referer': 'http://www.google.com'
                                         }).content,
                            sort_order=j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                            posterUrl,
                            headers={
                                'Referer': 'http://www.google.com'
                            }).content,
                                                                sort_order=j)
                    j = j + 1
                except:
                    Log("there was an issue")

    return metadata
Пример #2
0
def update(metadata, siteID, movieGenres, movieActors):
    Log('******UPDATE CALLED*******')
    url = str(metadata.id).split("|")[0].replace('+', '/')
    detailsPageElements = HTML.ElementFromURL(url)
    metadata.collections.clear()
    movieGenres.clearGenres()
    movieActors.clearActors()

    # Studio
    metadata.studio = "TeamSkeet"

    # Title
    metadata.title = detailsPageElements.xpath(
        '//title')[0].text_content().split(" | ")[1]

    # Summary
    metadata.summary = detailsPageElements.xpath(
        '//div[@class="gray"]')[1].text_content().replace('�', '')

    # Release Date
    releaseDate = detailsPageElements.xpath(
        '//div[@style="width:430px;text-align:left;margin:8px;border-right:3px dotted #bbbbbb;position:relative;"]//div[@class="gray"]'
    )[0].text_content()[12:].replace("th,", ",").replace("st,", ",").replace(
        "nd,", ",").replace("rd,", ",")
    date_object = datetime.strptime(releaseDate, '%B %d, %Y')
    metadata.originally_available_at = date_object
    metadata.year = metadata.originally_available_at.year

    #Tagline and Collection(s)
    tagline = detailsPageElements.xpath(
        '//div[@style="white-space:nowrap;"]')[0].text_content()[6:].strip()
    endofsubsite = tagline.find('.com')
    tagline = tagline[:endofsubsite].strip()
    metadata.tagline = tagline
    metadata.collections.add(metadata.tagline)

    # Genres
    genres = detailsPageElements.xpath('//a[contains(@href,"?tags=")]')
    if len(genres) > 0:
        for genreLink in genres:
            genreName = genreLink.text_content().strip('\n').lower()
            movieGenres.addGenre(genreName)

    # Actors
    try:
        actortext = detailsPageElements.xpath(
            '//title')[0].text_content().split('|')[0].strip()
        actors = actortext.split(' and ')
        if len(actors) > 0:
            for actorLink in actors:
                actorName = actorLink
                actorPhotoURL = ''
                movieActors.addActor(actorName, actorPhotoURL)
    except:
        pass

    ### Posters and artwork ###

    # Video trailer background image
    try:
        twitterBG = detailsPageElements.xpath('//video')[0].get("poster")
        metadata.art[twitterBG] = Proxy.Preview(
            HTTP.Request(background).content, sort_order=1)
    except:
        pass

    #Extra Posters
    import random
    art = []
    match = 0
    siteName = PAsearchSites.getSearchSiteName(siteID)

    for site in ["SkeetScenes.com", "TeamSkeetFans.com"]:
        try:
            match = fanSite[2]
        except:
            pass
        if match is 1:
            break
        fanSite = PAextras.getFanArt(site, art, actors, actorName,
                                     metadata.title, match, siteName)

    try:
        match = fanSite[2]
    except:
        pass

    if match is 1:
        # Return, first, last and randóm selection of images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[1], art[2], art[3], art[-1]] + random.sample(
            art, 4)
        art = sample
        Log("Selecting first 5, last and random 4 images from set")

        j = 1

        for posterUrl in art:
            Log("Trying next Image")
            if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
                #Download image file for analysis
                try:
                    hdr = {
                        'User-agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
                    }
                    req = urllib.Request(posterUrl, headers=hdr)
                    img_file = urllib.urlopen(req)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(
                            HTTP.Request(posterUrl, headers=hdr).content,
                            sort_order=j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                            posterUrl, headers=hdr).content,
                                                                sort_order=j)
                    j = j + 1
                except:
                    Log("there was an issue")
    else:

        posterPageElements = HTML.ElementFromURL(
            PAsearchSites.getSearchSearchURL(siteID) +
            metadata.title.replace(" ", "_"))
        posterLink = posterPageElements.xpath(
            '//img[contains(@src, "shared/scenes/new/")]')[0].get('src').split(
                "0")[0]
        posterNum = 1
        for poster in ["01.jpg", "02.jpg", "03.jpg", "04.jpg", "05.jpg"]:
            posterURL = posterLink + poster
            metadata.posters[posterURL] = Proxy.Preview(
                HTTP.Request(posterURL).content, sort_order=posterNum)
            posterNum += 1

    return metadata
Пример #3
0
def update(metadata,siteID,movieGenres,movieActors):
    url = str(metadata.id).split("|")[0].replace("_","/").replace("!","?")
    detailsPageElements = HTML.ElementFromURL(url)

    # Title
    titleElement = detailsPageElements.xpath('//div[contains(@class,"left-info")]//span')
    sourceSite = True
    if len(titleElement) == 0:
        titleElement = detailsPageElements.xpath('.//h2')
        sourceSite = False
    title = titleElement[0].text_content().strip()
    metadata.title = title

    # Studio
    metadata.studio = 'TeamSkeet'

    if sourceSite:
        Log("Original Site")
        date = detailsPageElements.xpath('//div[@class="scene-date"]')[0].text_content().strip()
        date_object = datetime.strptime(date, '%m/%d/%Y')
        summary = detailsPageElements.xpath('.//div[@class="scene-story"]')[0].text_content().strip()
        actors = detailsPageElements.xpath('//div[@class="starring"]//span')[0].text_content().split(" And ")
        if len(actors) > 0:
            for actorObject in actors:
                actorName = actorObject
                Log("Starring: " + actorName)
                actorPhotoURL = ''
                movieActors.addActor(actorName,actorPhotoURL)
        firstActorName = actors[0]
        backgroundURL = detailsPageElements.xpath('//video')[0].get("poster")
    else:
        Log("Fan info Site")
        date = detailsPageElements.xpath('.//div[@id="title-single"]//span[1]')[0].text_content().strip()
        try:
            date_object = datetime.strptime(date, '%B %dst, %Y')
        except:
            try:
                date_object = datetime.strptime(date, '%B %dnd, %Y')
            except:
                try:
                    date_object = datetime.strptime(date, '%B %drd, %Y')
                except:
                    try:
                        date_object = datetime.strptime(date, '%B %dth, %Y')
                    except:
                        date_object = None
        summary = detailsPageElements.xpath('.//p[@class="more"]')[0].text_content().replace("Story:","").strip()
        actors = detailsPageElements.xpath('//div[@id="title-single"]//a')
        if len(actors) > 0:
            for actorObject in actors:
                actorName = actorObject.text_content()
                Log("Starring: " + actorName)
                actorPhotoURL = ''
                movieActors.addActor(actorName,actorPhotoURL)
        firstActorName = actors[0].text_content()
        backgroundURL = detailsPageElements.xpath('//video')[0].get("poster")

    # Summary
    metadata.summary = summary

    # Tagline and Collection
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Date
    Log('Date: ' + date)
    if date_object != None:
        metadata.originally_available_at = date_object
        metadata.year = metadata.originally_available_at.year

    # Posters/Background
    valid_names = list()
    metadata.posters.validate_keys(valid_names)
    metadata.art.validate_keys(valid_names)

    # Background
    metadata.art[backgroundURL] = Proxy.Preview(HTTP.Request(backgroundURL, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
    Log("backgroundURL: " + backgroundURL)

    # try to get posters from fan site
    fanSceneInfo = siteName + "-" + firstActorName + "-" + title
    fanUrl = "https://teamskeetfans.com/" + fanSceneInfo.lower().replace(" ","-").replace("'","").replace("?","").replace("!","").replace(",","")
    Log("Trying fanUrl for posters: " + fanSceneInfo)
    try:
        fanPageElements = HTML.ElementFromURL(fanUrl)
        Log("fanUrl found")
        posters = fanPageElements.xpath('//div[contains(@class,"gallery-group")]//a')
        posterNum = 1
        for poster in posters:
            posterURL = poster.get("href")
            metadata.posters[posterURL] = Proxy.Preview(HTTP.Request(posterURL, headers={'Referer': 'http://www.google.com'}).content, sort_order = posterNum)
            posterNum = posterNum + 1
            Log("posterURL: " + posterURL)
    except:
        Log("fanUrl failed")
        metadata.posters[backgroundURL] = Proxy.Preview(HTTP.Request(backgroundURL, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
        
        #try for PAextras match
        art=[]
        match = 0
        for site in ["TeamSkeetFans.com", "SkeetScenes.com"]:
            fanSite = PAextras.getFanArt(site, art, actors, actorName, metadata.title, match, siteName)
            match = fanSite[2]
            if match is 1:	
                break
        
        if match is 1 and len(art) >= 10 or match is 2 and len(art) >= 10:
        # Return, first, last and randóm selection of 4 more images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.	
            sample = [art[0], art[-1]] + random.sample(art, 4)     
            art = sample
            Log("Selecting first, last and random 4 images from set")
        
        j = 1
                                          
        for posterUrl in art:
            Log("Trying next Image")
            
            if not PAsearchSites.posterAlreadyExists(posterUrl,metadata):            
            #Download image file for analysis
                try:
                    hdr = {'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
                    req = urllib.Request(posterUrl, headers=hdr)
                    img_file = urllib.urlopen(req)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(HTTP.Request(posterUrl, headers={'Referer': 'http://www.google.com'}).content, sort_order = j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(posterUrl, headers={'Referer': 'http://www.google.com'}).content, sort_order = j)
                    j = j + 1
                except:
                    Log("there was an issue")
                    pass

    return metadata
def update(metadata, siteID, movieGenres, movieActors):
    art = []
    Log('******UPDATE CALLED*******')
    detailsPageElements = HTML.ElementFromURL(
        str(metadata.id).split("|")[0].replace('_', '/').replace('!', '?'))
    try:
        Log("urlName: " + detailsPageElements.xpath(
            '//video[@id="main-movie-player"]')[0].get("poster").split('/')[5])
        urlName = detailsPageElements.xpath(
            '//video[@id="main-movie-player"]')[0].get("poster").split('/')[5]
    except:
        Log("urlName: " + detailsPageElements.xpath('//video[@id="preview"]')
            [0].get("poster").split('/')[5])
        urlName = detailsPageElements.xpath('//video[@id="preview"]')[0].get(
            "poster").split('/')[5]

    # Studio
    metadata.studio = "TeamSkeet"

    # Title
    metadata.title = detailsPageElements.xpath(
        '//div[@class="red_big"]/text()')[0].strip()

    # Summary
    metadata.summary = detailsPageElements.xpath(
        '(//div[@class="vid-desc-mobile"]/span)[not(position()=1)][not(position()=last())]'
    )[0].text_content()

    # Collections / Tagline
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Date
    date = str(metadata.id).split("|")[2]
    if len(date) > 0:
        date_object = parse(date)
        metadata.originally_available_at = date_object
        metadata.year = metadata.originally_available_at.year
        Log("Date from file")

    # Genres
    movieGenres.addGenre("Step Sister")

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath(
        '//div[@class="red_big"]/span/text()')[0].split(" and ")
    if len(actors) > 0:
        for actor in actors:
            actorName = actor
            actorPhotoURL = "http://cdn.teamskeetimages.com/design/tour/slm/tour/pics/" + urlName + "/" + urlName + ".jpg"
            Log("actorPhoto: " + actorPhotoURL)
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    try:
        art.append("http:" + detailsPageElements.xpath(
            '//video[@id="main-movie-player"]')[0].get("poster"))
    except:
        pass

    try:
        art.append("http:" + detailsPageElements.xpath(
            '//video[@id="preview"]')[0].get("poster"))
    except:
        pass

    try:
        art.append(
            "http://cdn1.teamskeetimages.com/design/tour/slm/tour/pics/" +
            urlName + "/v2.jpg")
    except:
        pass

    try:
        art.append(
            "https://cdn.teamskeetimages.com/design/tour/slm/tour/pics/" +
            urlName + "/bio_small.jpg")
    except:
        pass

    try:
        art.append(
            "https://cdn.teamskeetimages.com/design/tour/slm/tour/pics/" +
            urlName + "/bio_small2.jpg")
    except:
        pass

    try:
        art.append(
            "https://cdn.teamskeetimages.com/design/tour/slm/tour/pics/" +
            urlName + "/bio_big.jpg")
    except:
        pass

    try:
        art.append("http://cdn.teamskeetimages.com/teamskeet/slm/" + urlName +
                   "/shared/low.jpg")
    except:
        pass

    try:
        art.append("http://cdn.teamskeetimages.com/teamskeet/slm/" + urlName +
                   "/shared/med.jpg")
    except:
        pass

    try:
        art.append("http://cdn.teamskeetimages.com/teamskeet/slm/" + urlName +
                   "/shared/hi.jpg")
    except:
        pass

    #Extra Posters
    import random

    fanSite = PAextras.getFanArt("TeamSkeetFans.com", art, actors, actorName,
                                 metadata.title, 0)
    summary = fanSite[1]
    match = fanSite[2]

    if len(metadata.summary) < len(summary):
        metadata.summary = summary.strip()

    if match is 1 and len(art) >= 10 or match is 2 and len(art) >= 10:
        # Return, first, last and randóm selection of 4 more images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[-1]] + random.sample(art, 4)
        art = sample
        Log("Selecting first, last and random 4 images from set")

    j = 1

    for posterUrl in art:
        Log("Trying next Image")
        if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
            #Download image file for analysis
            try:
                img_file = urllib.urlopen(posterUrl)
                im = StringIO(img_file.read())
                resized_image = Image.open(im)
                width, height = resized_image.size
                #Add the image proxy items to the collection
                if width > 1 or height > width:
                    # Item is a poster
                    metadata.posters[posterUrl] = Proxy.Preview(HTTP.Request(
                        posterUrl,
                        headers={
                            'Referer': 'http://www.google.com'
                        }).content,
                                                                sort_order=j)
                if width > 100 and width > height:
                    # Item is an art item
                    metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                        posterUrl,
                        headers={
                            'Referer': 'http://www.google.com'
                        }).content,
                                                            sort_order=j)
                j = j + 1
            except:
                Log("there was an issue")
                pass

    return metadata
Пример #5
0
def update(metadata,siteID,movieGenres,movieActors):
    temp = str(metadata.id).split("|")[0]

    url = PAsearchSites.getSearchSearchURL(siteID) + temp
    Log('scene url: ' + url)
    detailsPageElements = HTML.ElementFromURL(url)


    metadata.studio = "P**n Pros"

    # Collections / Tagline
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Summary
    try:
        metadata.summary = detailsPageElements.xpath('//div[contains(@id, "description")]')[0].text_content().strip()
    except:
        pass

    try:
        if siteName.lower() == "Cum4K".lower():

            summaryurl = "https://cum4k.tube/" + temp
            Log(summaryurl)
            summaryPageElements = HTML.ElementFromURL(summaryurl)
            metadata.summary = summaryPageElements.xpath('//p[@class="more"]/text()')[0].strip()
    except:
        Log("did not pull tube summary")
        pass

    # Actors
    movieActors.clearActors()
    titleActors = ""
    actors = detailsPageElements.xpath('//div[contains(@class, "pt-md")]//a[contains(@href, "/girls/")]')
    if len(actors) > 0:
        for actorLink in actors:
            actorName = actorLink.text_content()
            actorPhotoURL = PAactors.actorDBfinder(actorName)
            titleActors = titleActors + actorName + " & "
            Log("actorPhoto: " + actorPhotoURL)
            movieActors.addActor(actorName,actorPhotoURL)

    # Manually Add Actors
    # Add Actor Based on Title
    if "Poke Her In The Front" == metadata.title:
        actorName = "Sara Luv"
        actorPhotoURL = ''
        movieActors.addActor(actorName, actorPhotoURL)
        actorName = "Dillion Harper"
        actorPhotoURL = ''
        movieActors.addActor(actorName, actorPhotoURL)

    # Genres
    movieGenres.clearGenres()
        # Based on site
    if siteName.lower() == "Lubed".lower():
        for genreName in ['Lube', 'Raw', 'Wet']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "Holed".lower():
        for genreName in ['Anal', 'Ass']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "POVD".lower():
        for genreName in ['Gonzo', 'POV']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "MassageCreep".lower():
        for genreName in ['Massage', 'Oil']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "DeepThroatLove".lower():
        for genreName in ['B*****b', 'Deep Throat']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "PureMature".lower():
        for genreName in ['MILF', 'Mature']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "Cum4K".lower():
        for genreName in ['Creampie']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "GirlCum".lower():
        for genreName in ['O*****s', 'Girl O****m', 'Multiple O*****s']:
            movieGenres.addGenre(genreName)
    elif siteName.lower() == "PassionHD".lower():
        for genreName in ['Hardcore']:
            movieGenres.addGenre(genreName)
    # Based on number of actors
    if len(actors) == 3:
        movieGenres.addGenre('Threesome')
    if len(actors) == 4:
        movieGenres.addGenre('Foursome')
    if len(actors) > 4:
        movieGenres.addGenre('Orgy')

    # Posters
    try:
        background = "http:" + detailsPageElements.xpath('//video[@id="player"]')[0].get('poster')
        Log("BG DL: " + background)
        metadata.art[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
        metadata.posters[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 1)
    except:
        pass

    # Date
    try:
        date = detailsPageElements.xpath('//div[@class="d-inline d-lg-block mb-1"]/span')[0].text_content().strip()
        if len(date) > 0:
            date_object = datetime.strptime(date, '%B %d, %Y')
            metadata.originally_available_at = date_object
            metadata.year = metadata.originally_available_at.year
    except:
        date = str(metadata.id).split("|")[2]
        if len(date) > 0:
            date_object = parse(date)
            metadata.originally_available_at = date_object
            metadata.year = metadata.originally_available_at.year
            Log("Date from file")

    # Title
    metadata.title = detailsPageElements.xpath('//h1')[0].text_content().strip()

    #Extra Posters
    import random
    art = []
    match = 0

    if siteName.lower() == "Holed".lower():
        fanSite = PAextras.getFanArt("AnalPornFan.com", art, actors, actorName, metadata.title, 0, siteName)
    elif siteName.lower() == "SpyFam".lower():
        fanSite = PAextras.getFanArt("SpyFams.com", art, actors, actorName, metadata.title, 0, siteName)
    elif siteName.lower() == "Lubed".lower():
        fanSite = PAextras.getFanArt("LubedFan.com", art, actors, actorName, metadata.title, 0, siteName)
    elif siteName.lower() == "PassionHD".lower():
        fanSite = PAextras.getFanArt("PassionHDFan.com", art, actors, actorName, metadata.title, 0, siteName)
    elif siteName.lower() == "Tiny4K".lower():
        fanSite = PAextras.getFanArt("Tiny4KFan.com", art, actors, actorName, metadata.title, 0, siteName)

    for site in ["HQSluts.com", "ImagePost.com", "PornGirlsErotica.com", "PinkWorld.com", "CoedCherry.com/pics"]:
        try:
            match = fanSite[2]
        except:
            pass
        if match is 1:
            break
        fanSite = PAextras.getFanArt(site, art, actors, actorName, metadata.title, match, siteName)

    try:
        match = fanSite[2]
    except:
        pass
    summary = fanSite[1]

    try:
        if len(summary) > 0:
            metadata.summary = summary
    except:
        metadata.summary = summary

    if match is 1:
        # Return, first, last and randóm selection of images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[1], art[2], art[3], art[-1]] + random.sample(art, 4)
        art = sample
        Log("Selecting first 5, last and random 4 images from set")

        j = 1

        for posterUrl in art:
            Log("Trying next Image")
            if not PAsearchSites.posterAlreadyExists(posterUrl,metadata):
            #Download image file for analysis
                try:
                    hdr = {
                            'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
                    }
                    req = urllib.Request(posterUrl, headers=hdr)
                    img_file = urllib.urlopen(req)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(HTTP.Request(posterUrl, headers=hdr).content, sort_order = j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(posterUrl, headers=hdr).content, sort_order = j)
                    j = j + 1
                except:
                    Log("there was an issue")
    else:
        #Smaller background images
        Log("Match is not 1")
        try:
            for image in detailsPageElements.xpath('(//img[contains(@src, "handtouched")])[position() <5]'):
                background = "http:" + image.get('src')
                Log("BG DL: " + background)
                metadata.art[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 2)
                metadata.posters[background] = Proxy.Preview(HTTP.Request(background, headers={'Referer': 'http://www.google.com'}).content, sort_order = 2)
        except:
            pass

    return metadata
Пример #6
0
def update(metadata, siteID, movieGenres, movieActors):
    art = []
    Log('******UPDATE CALLED*******')
    detailsPageElements = HTML.ElementFromURL(
        str(metadata.id).split("|")[0].replace('_', '/'))

    # Summary
    metadata.studio = "TeamSkeet"
    metadata.summary = detailsPageElements.xpath(
        '//div[@class="trailer-content story"]')[0].text_content().strip()
    metadata.title = detailsPageElements.xpath(
        '//span[@class="p-small red"]')[0].text_content().strip()

    # Collections / Tagline
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Release Date
    date = detailsPageElements.xpath('//span[@class="date"]')
    if len(date) > 0:
        date = date[0].text_content().strip()
        date_object = parse(date)
        metadata.originally_available_at = date_object
        metadata.year = metadata.originally_available_at.year

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath(
        '//span[@class="p-small"]')[0].text_content().replace(
            'Starring:', '').strip().split(" and ")
    if len(actors) > 0:
        for actor in actors:
            actorName = actor
            posterUrl = detailsPageElements.xpath(
                '//video[@id="preview"]')[0].get("poster")
            actorPhotoURL = posterUrl.replace('trailer_tour',
                                              posterUrl.split('/')[8])
            Log("actorPhoto: " + actorPhotoURL)
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    try:
        art.append(
            detailsPageElements.xpath('//video[@id="preview"]')[0].get(
                "poster"))
    except:
        pass

    #Extra Posters
    import random

    fanSite = PAextras.getFanArt("TeamSkeetFans.com", art, actors, actorName,
                                 metadata.title, 0)
    summary = fanSite[1]
    match = fanSite[2]

    if len(metadata.summary) < len(summary):
        metadata.summary = summary.strip()

    if match is 1 and len(art) >= 10 or match is 2 and len(art) >= 10:
        # Return, first, last and randóm selection of 4 more images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[-1]] + random.sample(art, 4)
        art = sample
        Log("Selecting first, last and random 4 images from set")

    j = 1

    for posterUrl in art:
        Log("Trying next Image")
        if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
            #Download image file for analysis
            try:
                img_file = urllib.urlopen(posterUrl)
                im = StringIO(img_file.read())
                resized_image = Image.open(im)
                width, height = resized_image.size
                #Add the image proxy items to the collection
                if width > 1 or height > width:
                    # Item is a poster
                    metadata.posters[posterUrl] = Proxy.Preview(HTTP.Request(
                        posterUrl,
                        headers={
                            'Referer': 'http://www.google.com'
                        }).content,
                                                                sort_order=j)
                if width > 100 and width > height:
                    # Item is an art item
                    metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                        posterUrl,
                        headers={
                            'Referer': 'http://www.google.com'
                        }).content,
                                                            sort_order=j)
                j = j + 1
            except:
                Log("there was an issue")
                pass

    return metadata
Пример #7
0
def update(metadata, siteID, movieGenres, movieActors):
    art = []
    Log('******UPDATE CALLED*******')
    detailsPageElements = HTML.ElementFromURL(
        str(metadata.id).split("|")[0].replace('_', '/'))

    # Summary
    metadata.studio = "TeamSkeet"
    metadata.summary = detailsPageElements.xpath(
        '//div[@class="trailer-content story"]')[0].text_content().strip()
    metadata.title = detailsPageElements.xpath(
        '//span[@class="p-small red"]')[0].text_content().strip()

    # Collections / Tagline
    siteName = PAsearchSites.getSearchSiteName(siteID)
    metadata.collections.clear()
    metadata.tagline = siteName
    metadata.collections.add(siteName)

    # Genres
    movieGenres.addGenre("Step Mom")

    # Release Date
    date = detailsPageElements.xpath('//span[@class="date"]')
    if len(date) > 0:
        date = date[0].text_content().strip()
        date_object = parse(date)
        metadata.originally_available_at = date_object
        metadata.year = metadata.originally_available_at.year

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath(
        '//span[@class="p-small"]')[0].text_content().replace(
            'Starring:', '').replace('2', '').strip().split(" And ")
    if len(actors) > 0:
        for actor in actors:
            actorName = actor
            try:
                posterUrl = detailsPageElements.xpath(
                    '//video[@id="preview"]')[0].get("poster")
                actorPhotoURL = posterUrl.replace('trailer_tour',
                                                  posterUrl.split('/')[8])
            except:
                actorPhotoURL = ''
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    try:
        art.append(
            detailsPageElements.xpath('//video[@id="main-movie-player"]')
            [0].get("poster"))
    except:
        art.append(
            detailsPageElements.xpath(
                '//img[@class="img-fluid scene-trailer"]')[0].get("src"))

    # Extra Posters
    import random

    fanSite = PAextras.getFanArt("TeamSkeetFans.com", art, actors, actorName,
                                 metadata.title, 0, siteName)
    summary = fanSite[1]
    match = fanSite[2]

    if len(metadata.summary) < len(summary):
        metadata.summary = summary.strip()

    if match is 1 and len(art) >= 10 or match is 2 and len(art) >= 10:
        # Return, first, last and random selection of 4 more images
        # If you want more or less posters edit the value in random.sample below or refresh metadata to get a different sample.
        sample = [art[0], art[-1]] + random.sample(art, 4)
        art = sample
        Log("Selecting first, last and random 4 images from set")

    j = 1
    Log("Artwork found: " + str(len(art)))
    for posterUrl in art:
        metadata.posters[posterUrl] = Proxy.Preview(HTTP.Request(
            posterUrl, headers={
                'Referer': 'http://www.google.com'
            }).content,
                                                    sort_order=j)
        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
            posterUrl, headers={
                'Referer': 'http://www.google.com'
            }).content,
                                                sort_order=j)
        j = j + 1

    return metadata
Пример #8
0
def update(metadata, siteNum, movieGenres, movieActors):
    metadata_id = str(metadata.id).split('|')
    sceneURL = PAutils.Decode(metadata_id[0])
    if not sceneURL.startswith('http'):
        sceneURL = PAsearchSites.getSearchBaseURL(siteNum) + sceneURL
    req = PAutils.HTTPRequest(sceneURL)
    detailsPageElements = HTML.ElementFromString(req.text)

    # Title
    metadata.title = detailsPageElements.xpath(
        '//div[@class="row info"]//div[@class="small-12 medium-12 large-12 columns"]'
    )[0].text_content().strip()

    # Summary
    paragraphs = detailsPageElements.xpath(
        '//div[@class="small-12 medium-12 large-12 columns info"]//p')
    summary = ''
    for paragraph in paragraphs:
        summary += '\n\n' + paragraph.text_content()
    metadata.summary = summary.strip()

    # Studio
    metadata.studio = 'X-Art'

    # Tagline and Collection(s)
    metadata.collections.clear()
    tagline = PAsearchSites.getSearchSiteName(siteNum).strip()
    metadata.tagline = tagline
    metadata.collections.add(tagline)

    # Release Date
    date = detailsPageElements.xpath('//h2')[2].text_content()[:-1]
    if date:
        date_object = datetime.strptime(date, '%b %d, %Y')
        metadata.originally_available_at = date_object
        metadata.year = metadata.originally_available_at.year

    # Genres
    movieGenres.clearGenres()
    movieGenres.addGenre('Artistic')
    movieGenres.addGenre('Glamorous')

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath('//h2//a')
    if actors:
        if len(actors) == 3:
            movieGenres.addGenre('Threesome')
        if len(actors) == 4:
            movieGenres.addGenre('Foursome')
        if len(actors) > 4:
            movieGenres.addGenre('Orgy')
        for actorLink in actors:
            actorName = actorLink.text_content()

            actorPageURL = actorLink.get('href')
            req = PAutils.HTTPRequest(actorPageURL)
            actorPage = HTML.ElementFromString(req.text)
            actorPhotoURL = actorPage.xpath('//img[@class="info-img"]/@src')[0]

            movieActors.addActor(actorName, actorPhotoURL)

    # Posters
    art = []
    xpaths = [
        '//div[@class="gallery-item"]//img/@src',
        '//img[contains(@src, "/videos")]/@src',
        '//section[@id="product-gallery"]//img/@data-src'
    ]
    for xpath in xpaths:
        for poster in detailsPageElements.xpath(xpath):
            poster.replace(' ', '_')

            art.append(poster)

    # Extra Posters
    art_ext = []
    match = 0

    for site in [
            'XartFan.com', 'HQSluts.com', 'ImagePost.com',
            'CoedCherry.com/pics', 'Nude-Gals.com'
    ]:
        fanSite = PAextras.getFanArt(site, art_ext, actors, actorName,
                                     metadata.title.strip(), match,
                                     PAsearchSites.getSearchSiteName(siteNum))
        match = fanSite[2]
        if match is 1:
            break

    if match is 1 or match is 2:
        art.extend(art_ext)

    Log('Artwork found: %d' % len(art))
    for idx, posterUrl in enumerate(art, 1):
        if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
            # Download image file for analysis
            try:
                image = PAutils.HTTPRequest(posterUrl)
                im = StringIO(image.content)
                resized_image = Image.open(im)
                width, height = resized_image.size
                # Add the image proxy items to the collection
                if width > 1 or height > width:
                    # Item is a poster
                    metadata.posters[posterUrl] = Proxy.Media(image.content,
                                                              sort_order=idx)
                if width > 100 and width > height:
                    # Item is an art item
                    metadata.art[posterUrl] = Proxy.Media(image.content,
                                                          sort_order=idx)
            except:
                pass

    return metadata
Пример #9
0
def update(metadata, siteID, movieGenres, movieActors):
    Log('******UPDATE CALLED*******')
    temp = str(metadata.id).split("|")[0].replace('+', '/')

    url = PAsearchSites.getSearchBaseURL(siteID) + temp
    detailsPageElements = HTML.ElementFromURL(url)

    # Summary
    metadata.studio = "X-Art"
    paragraphs = detailsPageElements.xpath(
        '//div[@class="small-12 medium-12 large-12 columns info"]//p')
    summary = ""
    for paragraph in paragraphs:
        summary = summary + '\n\n' + paragraph.text_content()
    metadata.summary = summary.strip()
    metadata.title = detailsPageElements.xpath('//title')[0].text_content()[8:]
    date = detailsPageElements.xpath('//h2')[2].text_content()[:-1]
    date_object = datetime.strptime(date, '%b %d, %Y')
    metadata.originally_available_at = date_object
    metadata.year = metadata.originally_available_at.year

    # Genres
    movieGenres.clearGenres()
    # No Source for Genres, add manual
    movieGenres.addGenre("Artistic")
    movieGenres.addGenre("Glamcore")

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath('//h2//a')
    if len(actors) > 0:
        if len(actors) == 3:
            movieGenres.addGenre("Threesome")
        if len(actors) == 4:
            movieGenres.addGenre("Foursome")
        if len(actors) > 4:
            movieGenres.addGenre("Orgy")
        for actorLink in actors:
            actorName = actorLink.text_content()
            actorPageURL = actorLink.get("href")
            actorPage = HTML.ElementFromURL(actorPageURL)
            actorPhotoURL = actorPage.xpath('//img[@class="info-img"]')[0].get(
                "src")
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    valid_names = list()
    metadata.posters.validate_keys(valid_names)
    metadata.art.validate_keys(valid_names)
    try:
        posters = detailsPageElements.xpath('//div[@class="gallery-item"]')[0]
        poster = posters.xpath('.//img')[0].get('src')
    except:
        pass
    background = detailsPageElements.xpath(
        '//img[contains(@src,"/videos")]')[0].get("src")
    metadata.art[background] = Proxy.Preview(HTTP.Request(background).content,
                                             sort_order=1)
    try:
        posterURL = poster[:-21] + "2.jpg"
    except:
        posterURL = background[:-21] + "2.jpg"
    metadata.posters[posterURL] = Proxy.Preview(
        HTTP.Request(posterURL).content, sort_order=1)

    #Extra Posters
    import random
    art = []
    match = 0

    for site in ["XartFan.com", "HQSluts.com", "XartBeauties.com/galleries"]:
        fanSite = PAextras.getFanArt(site, art, actors, actorName,
                                     metadata.title, match)
        match = fanSite[2]
        if match is 1:
            break

    if match is 1 or match is 2:
        # Return, first few, last one and randóm selection of images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        try:
            sample = [art[0], art[1], art[2], art[3], art[-1]] + random.sample(
                art, 4)
            art = sample
            Log("Selecting subset of " + str(len(art)) +
                " images from the set.")
        except:
            pass

        j = 1

        for posterUrl in art:
            Log("Trying next Image")
            Log(posterUrl)
            if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
                #Download image file for analysis
                try:
                    img_file = urllib.urlopen(posterUrl)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(
                            HTTP.Request(posterUrl,
                                         headers={
                                             'Referer': 'http://www.google.com'
                                         }).content,
                            sort_order=j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                            posterUrl,
                            headers={
                                'Referer': 'http://www.google.com'
                            }).content,
                                                                sort_order=j)
                    j = j + 1
                except:
                    Log("there was an issue")
    return metadata
Пример #10
0
def update(metadata, siteID, movieGenres, movieActors):
    Log('******UPDATE CALLED*******')
    temp = str(metadata.id).split("|")[0].replace('+', '/')

    url = PAsearchSites.getSearchBaseURL(siteID) + temp
    detailsPageElements = HTML.ElementFromURL(url)

    # Summary
    metadata.studio = "X-Art"
    paragraphs = detailsPageElements.xpath(
        '//div[@class="small-12 medium-12 large-12 columns info"]//p')
    summary = ""
    for paragraph in paragraphs:
        summary = summary + '\n\n' + paragraph.text_content()
    metadata.summary = summary.strip()
    metadata.title = detailsPageElements.xpath('//title')[0].text_content()[8:]
    date = detailsPageElements.xpath('//h2')[2].text_content()[:-1]
    date_object = datetime.strptime(date, '%b %d, %Y')
    metadata.originally_available_at = date_object
    metadata.year = metadata.originally_available_at.year

    # Genres
    movieGenres.clearGenres()
    # No Source for Genres, add manual
    movieGenres.addGenre("Artistic")
    movieGenres.addGenre("Glamcore")

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath('//h2//a')
    if len(actors) > 0:
        if len(actors) == 3:
            movieGenres.addGenre("Threesome")
        if len(actors) == 4:
            movieGenres.addGenre("Foursome")
        if len(actors) > 4:
            movieGenres.addGenre("Orgy")
        for actorLink in actors:
            actorName = actorLink.text_content()
            actorPageURL = actorLink.get("href")
            actorPage = HTML.ElementFromURL(actorPageURL)
            actorPhotoURL = actorPage.xpath('//img[@class="info-img"]')[0].get(
                "src")
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    valid_names = list()
    metadata.posters.validate_keys(valid_names)
    metadata.art.validate_keys(valid_names)
    try:
        posters = detailsPageElements.xpath('//div[@class="gallery-item"]')[0]
        poster = posters.xpath('.//img')[0].get('src')
    except:
        pass
    background = detailsPageElements.xpath(
        '//img[contains(@src,"/videos")]')[0].get("src")
    metadata.art[background] = Proxy.Preview(HTTP.Request(background).content,
                                             sort_order=1)
    try:
        posterURL = poster[:-21] + "2.jpg"
    except:
        posterURL = background[:-21] + "2.jpg"
    metadata.posters[posterURL] = Proxy.Preview(
        HTTP.Request(posterURL).content, sort_order=1)

    # Extra Posters
    art = []
    match = 0
    from googlesearch import search

    overrideSettings = getBadMatchID(metadata.title)
    if overrideSettings != 9999:
        overrideURL = overrideSettings[1]
        overrideSite = overrideSettings[0]

    if getNoMatchID(metadata.title) == 9999:
        for i in range(1, 4):

            if match is 0 or match is 2:
                if i is 1:
                    Log("Trying XartFan")
                    urls = search('site:xartfan.com ' + actorName + ' ' +
                                  metadata.title,
                                  stop=2)
                # Test PAextras match
                elif i is 2:
                    fanSite = PAextras.getFanArt("hqsluts.com", art, actors,
                                                 actorName, metadata.title)
                    try:
                        if str(len(art)) > 1:
                            match = 1
                    except:
                        pass
                elif i is 3:
                    Log("Trying XartBeauties")
                    urls = search('site:xartbeauties.com/galleries ' +
                                  actorName + ' ' + metadata.title,
                                  stop=2)
                elif i is 4:
                    Log("Trying EroticBeauties")
                    urls = search('site:eroticbeauties.net/pics ' + actorName +
                                  ' ' + metadata.title,
                                  stop=2)
                elif i is 5:
                    Log("Trying Nude-Gals")
                    urls = search('site:nude-gals.com ' + actorName + ' ' +
                                  metadata.title,
                                  stop=2)

            for url in urls:
                if match is 0 or match is 2:
                    if overrideSettings != 9999:
                        url = overrideURL
                        i = overrideSite
                        Log("Title known for bad fan match. URL set manually.")

                    googleSearchURL = url
                    fanPageElements = HTML.ElementFromURL(googleSearchURL)

                    try:
                        # See if the actress name matches
                        if i is 1:
                            # Xartfan
                            nameinheader = fanPageElements.xpath(
                                '//header[@class="entry-header"]/p//a'
                            )[0].text_content()
                            Log("Actress name in header: " + nameinheader)
                        if i is 3:
                            # Xart Beauties
                            nameinheader = fanPageElements.xpath(
                                '(//div[@id="header-text"]//p//a)[not(position()=last())]'
                            )[0].text_content()
                            Log("Actress name in header: " + nameinheader)
                        if i is 4:
                            # Erotic Beauties
                            nameinheader = fanPageElements.xpath(
                                '//div[@class="clearfix"]//a[contains(@href, "model")]'
                            )[0].text_content()
                            Log("Actress name in header: " + nameinheader)
                        if i is 5:
                            # Nude-Gals
                            nameinheader = fanPageElements.xpath(
                                '//div[@class="row photoshoot-title row_margintop"]//a[contains(@href, "model")]'
                            )[0].text_content()
                            Log("Actress name in header: " + nameinheader)
                        try:
                            for actorLink in actors:
                                if match is 0 or match is 2:
                                    actorName = actorLink.text_content()
                                    Log("Comparing with " + actorName)
                                    if actorName in nameinheader or nameinheader in actorName:
                                        Log("Fansite Match Found")
                                        match = 1
                                    else:
                                        try:
                                            # When there are multiple actors listed we need to check all of them.
                                            for name in nameinheader:
                                                if match is 0 or match is 2:
                                                    Log(name + " vs " +
                                                        actorName)
                                                    if actorName.lower(
                                                    ) in name.lower():
                                                        Log(siteName +
                                                            " Fansite Match Found"
                                                            )
                                                        match = 1

                                        except:
                                            Log("No Actress Match")
                        except:
                            Log("No Actress Match")
                    except:
                        Log("No Actress found in the site header")

                    # found one example of a badmatch not working because the actress match failed. this forces it to proceed.
                    if overrideSettings != 9999:
                        match = 1

                    # Posters
                    if match is 1:
                        try:
                            Log("Searching for images")
                            if i is 1:
                                # Xart Fan
                                for posterURL in fanPageElements.xpath(
                                        '//div[contains(@class, "tiled-gallery")]//a//img'
                                ):
                                    art.append(
                                        posterURL.get(
                                            'data-orig-file').replace(
                                                'images.', ''))
                                Log("Images found on Xart Fan.")

                            if i is 3:
                                # Xart Beauties
                                for posterURL in fanPageElements.xpath(
                                        '//div[@id="gallery-thumbs"]//img'):
                                    art.append(
                                        posterURL.get('src').replace(
                                            'images.',
                                            'www.').replace('/tn', ''))
                                Log("Images found on Xart Beauties.")
                            if i is 4:
                                # Erotic Beauties
                                for posterURL in fanPageElements.xpath(
                                        '//div[contains(@class, "my-gallery")]//a'
                                ):
                                    art.append(posterURL.get('href'))
                                Log("Images found on Erotic Beauties.")
                            if i is 5:
                                # Nude-Gals
                                for posterURL in fanPageElements.xpath(
                                        '(//div[@class="row row_margintop"]//a)[not(contains(@title, "#"))]'
                                ):
                                    art.append("https://nude-gals.com/" +
                                               posterURL.get('href'))
                                Log("Images found on Nude-Gals.")
                        except:
                            Log("No Images Found")
                            pass

                        Log("Artwork found: " + str(len(art)))
                        if len(art) < 9 and match is 1:
                            Log("Less than 10 images found. Searching for more"
                                )
                            match = 2

        if match is 1 or match is 2:
            # Return, first few, last one and randóm selection of images
            # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
            try:
                sample = [art[0], art[1], art[2], art[3], art[-1]
                          ] + random.sample(art, 4)
                art = sample
                Log("Selecting subset of " + str(len(art)) +
                    " images from the set.")
            except:
                pass

        j = 1

        for posterUrl in art:
            Log("Trying next Image")
            Log(posterUrl)
            if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
                #Download image file for analysis
                try:
                    img_file = urllib.urlopen(posterUrl)
                    im = StringIO(img_file.read())
                    resized_image = Image.open(im)
                    width, height = resized_image.size
                    #Add the image proxy items to the collection
                    if width > 1 or height > width:
                        # Item is a poster
                        metadata.posters[posterUrl] = Proxy.Preview(
                            HTTP.Request(posterUrl,
                                         headers={
                                             'Referer': 'http://www.google.com'
                                         }).content,
                            sort_order=j)
                    if width > 100 and width > height:
                        # Item is an art item
                        metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(
                            posterUrl,
                            headers={
                                'Referer': 'http://www.google.com'
                            }).content,
                                                                sort_order=j)
                    j = j + 1
                except:
                    Log("there was an issue")

    return metadata
Пример #11
0
def update(metadata, siteID, movieGenres, movieActors):
    Log('******UPDATE CALLED*******')
    temp = str(metadata.id).split("|")[0].replace('+', '/')

    url = PAsearchSites.getSearchBaseURL(siteID) + temp
    detailsPageElements = HTML.ElementFromURL(url)

    # Summary
    metadata.studio = "X-Art"
    paragraphs = detailsPageElements.xpath(
        '//div[@class="small-12 medium-12 large-12 columns info"]//p')
    summary = ""
    for paragraph in paragraphs:
        summary = summary + '\n\n' + paragraph.text_content()
    metadata.summary = summary.strip()
    metadata.title = detailsPageElements.xpath(
        '//div[@class="row info"]//div[@class="small-12 medium-12 large-12 columns"]'
    )[0].text_content().strip()
    date = detailsPageElements.xpath('//h2')[2].text_content()[:-1]
    date_object = datetime.strptime(date, '%b %d, %Y')
    metadata.originally_available_at = date_object
    metadata.year = metadata.originally_available_at.year

    #Tagline and Collection(s)
    tagline = PAsearchSites.getSearchSiteName(siteID).strip()
    metadata.tagline = tagline
    metadata.collections.add(tagline)

    # Genres
    movieGenres.clearGenres()
    # No Source for Genres, add manual
    movieGenres.addGenre("Artistic")
    movieGenres.addGenre("Glamorous")

    # Actors
    movieActors.clearActors()
    actors = detailsPageElements.xpath('//h2//a')
    if len(actors) > 0:
        if len(actors) == 3:
            movieGenres.addGenre("Threesome")
        if len(actors) == 4:
            movieGenres.addGenre("Foursome")
        if len(actors) > 4:
            movieGenres.addGenre("Orgy")
        for actorLink in actors:
            actorName = actorLink.text_content()
            actorPageURL = actorLink.get("href")
            actorPage = HTML.ElementFromURL(actorPageURL)
            actorPhotoURL = actorPage.xpath('//img[@class="info-img"]')[0].get(
                "src")
            movieActors.addActor(actorName, actorPhotoURL)

    # Posters/Background
    valid_names = list()
    metadata.posters.validate_keys(valid_names)
    metadata.art.validate_keys(valid_names)
    thumbs = []
    try:
        for posterURL in detailsPageElements.xpath(
                '//div[@class="gallery-item"]//img'):
            thumbs.append((posterURL.get('src')).replace(" ", "_"))
    except:
        Log("No Thumbnails found")
    background = detailsPageElements.xpath(
        '//img[contains(@src,"/videos")]')[0].get("src")
    metadata.art[background] = Proxy.Preview(HTTP.Request(background).content,
                                             sort_order=1)
    try:
        posterURL = str((thumbs[0]))[:-5] + "2.jpg"
    except:
        posterURL = background.replace("1.jpg", "2.jpg").replace(
            "1-lrg.jpg", "2-lrg.jpg")
    metadata.posters[posterURL] = Proxy.Preview(
        HTTP.Request(posterURL).content, sort_order=1)

    #Extra Posters
    import random
    art = []
    match = 0
    siteName = PAsearchSites.getSearchSiteName(siteID)

    for site in [
            "XartFan.com", "HQSluts.com", "ImagePost.com",
            "CoedCherry.com/pics", "Nude-Gals.com"
    ]:
        fanSite = PAextras.getFanArt(site, art, actors, actorName,
                                     metadata.title.strip(), match, siteName)
        match = fanSite[2]
        if match is 1:
            break
    #try:
    #art = thumbs
    #except:
    #pass

    if match is 1 or match is 2:
        # Return, first few, last one and randóm selection of images
        # If you want more or less posters edít the value in random.sample below or refresh metadata to get a different sample.
        try:
            sample = [art[0], art[1], art[2], art[3], art[-1]] + random.sample(
                art, 4)
            art = sample
            Log("Selecting subset of " + str(len(art)) +
                " images from the set.")
        except:
            pass

        try:
            j = 1

            for posterUrl in art:
                Log("Trying next Image")
                Log(posterUrl)
                if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):
                    #Download image file for analysis
                    try:
                        #hdr needed to get images from some fansites. No adverse effects seen so far.
                        hdr = {
                            'User-agent':
                            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
                        }
                        req = urllib.Request(posterUrl, headers=hdr)
                        img_file = urllib.urlopen(req)
                        im = StringIO(img_file.read())
                        resized_image = Image.open(im)
                        width, height = resized_image.size
                        #Add the image proxy items to the collection
                        if width > 1 or height > width:
                            # Item is a poster
                            metadata.posters[posterUrl] = Proxy.Preview(
                                HTTP.Request(posterUrl,
                                             headers={
                                                 'Referer':
                                                 'http://www.google.com'
                                             }).content,
                                sort_order=j)
                        if width > 100 and width > height:
                            # Item is an art item
                            metadata.art[posterUrl] = Proxy.Preview(
                                HTTP.Request(posterUrl,
                                             headers={
                                                 'Referer':
                                                 'http://www.google.com'
                                             }).content,
                                sort_order=j)
                        j = j + 1
                    except:
                        Log("there was an issue")
                        #metadata.art[posterUrl] = Proxy.Preview(HTTP.Request(posterUrl, headers={'Referer': 'http://www.google.com'}).content, sort_order = j)
        except:
            pass

    return metadata