def get_next_image(self, last_image_counter=0):
        """Retrieves the URL of the latest image of Deutsche Bahn Gruppenpool,
        downloads the image, stores it in a temporary folder and returns the path
        to it
        """

        logging.debug('get_next_image({})'.format(last_image_counter))

        # Generate empty image
        next_image = wariety_wallpaper.WarietyWallpaper()
        page = 0

        if len(self.state) == 0:
            self.state = self.retrieve_images_data()
            self.state['page'] = 1
        else:
            page = self.state['page']

        # get image url
        if self.state['stat'] == 'ok':
            if last_image_counter >= len(self.state['photos']['photo']):
                self.state = self.retrieve_images_data(int(page) + 1)
            image_id = self.state['photos']['photo'][last_image_counter]['id']
            image_secret = self.state['photos']['photo'][last_image_counter][
                'secret']
            image_title = self.state['photos']['photo'][last_image_counter][
                'title']
            image_author = self.state['photos']['photo'][last_image_counter][
                'ownername']
            image_info_data = self.retrieve_image_info_data(
                image_id, image_secret)
            if image_info_data['stat'] == 'ok':
                image_sizes = self.retrieve_image_sizes_data(image_id)
                if image_sizes['stat'] == 'ok':
                    image_url = ''
                    for size in image_sizes['sizes']['size']:
                        if size['width'] > 1900:  # TODO get current system's screen sizes
                            image_url = size['source']
                            break

                    # Fill image data
                    next_image.source_url = urllib.parse.unquote(BASE_URL)
                    next_image.source_type = DOWNLOADER_TYPE
                    next_image.image_author = image_author
                    next_image.source_name = DOWNLOADER_DESCRIPTION
                    next_image.image_url = urllib.parse.unquote(
                        urllib.parse.urljoin(BASE_URL, image_url))
                    next_image.location = ''
                    next_image.keywords = image_title
                    next_image.source_location = ''
                    next_image.found_at_counter = last_image_counter

                    # Store state
                    self.state[
                        'last_image_counter'] = next_image.found_at_counter
                    startdate = datetime.datetime.now().strftime('%Y%m%d')
                    self.state['startdate'] = startdate
                    self.state['page'] = page

        return next_image
Esempio n. 2
0
    def get_next_image(self, last_image_counter=0):
        """
        Retrieves a BING picture of the day and returns it
        as an instance of wariety wallpaper. Depending on the counter
        given by 'last_image_counter' returns the next picture.
        :param last_image_counter:
        :return next_image:
        """

        logging.debug('get_next_image({})'.format(last_image_counter))

        # Generate empty image
        next_image = wariety_wallpaper.WarietyWallpaper()

        # Receive image data
        try:
            response = requests.get(START_URL, proxies=self.proxies)
            image_data = json.loads(response.text)

            # Collect image data
            image_url = image_data["images"][0]["url"]
            image_url = image_url.split("&")[0]
            image_title = image_data["images"][0]["title"]
            image_copyright = image_data["images"][0]["copyright"]
            if image_title:
                image_title = image_title + '.'
            if image_copyright:
                image_copyright = image_copyright + '.'
            _next_image_url = urllib.parse.urljoin(BASE_URL, image_url)
            _next_image_url = _next_image_url.split('/')[-1].split('=')[-1]
            try:
                next_image.image_name = urllib.parse.unquote(_next_image_url)
            except:
                logging.debug('get_next_image() - invalid URL {}'.format(
                    _next_image_url))
                next_image.image_name = ''

            # Fill image data
            next_image.source_url = urllib.parse.unquote(BASE_URL)
            next_image.source_type = DOWNLOADER_TYPE
            next_image.image_author = ''
            next_image.source_name = DOWNLOADER_DESCRIPTION
            next_image.image_url = urllib.parse.unquote(
                urllib.parse.urljoin(BASE_URL, image_url))
            next_image.location = ''
            next_image.keywords = '{0} {1}'.format(image_title,
                                                   image_copyright).strip()
            next_image.source_location = ''
            next_image.found_at_counter = last_image_counter

            # Store state
            self.state['last_image_counter'] = next_image.found_at_counter
            startdate = datetime.datetime.now().strftime('%Y%m%d')
            self.state['startdate'] = startdate
            self.state['idx'] = 0

        except requests.ConnectionError:
            logging.debug('get_next_image() - ConnectionError')

        return next_image
Esempio n. 3
0
    def get_next_image(self, last_image_counter=0):
        """
        Loops through all locally stored Windows Spotlight assets
        and copies and returns the latest asset which has the same orientation as the screen
        """

        logging.debug('get_next_image({})'.format(last_image_counter))

        next_image = wariety_wallpaper.WarietyWallpaper()
        counter = -1
        localappdata = os.environ['LOCALAPPDATA']
        localstate_assests = r'\Packages\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\LocalState\Assets'
        list_of_files = sorted(glob.glob(localappdata + localstate_assests +
                                         '\*'),
                               key=os.path.getmtime,
                               reverse=True)
        for asset in list_of_files:
            counter = counter + 1
            if counter == last_image_counter:
                extension = imghdr.what(asset)
                if extension in ['jpeg', 'jpg', 'png']:
                    # Generate pseudo url
                    try:
                        next_image.image_name = os.path.split(asset)[1].split(
                            '\\')[-1]
                    except:
                        next_image.image_name = ''
                    next_image.source_url = 'https://docs.microsoft.com/en-us/windows/configuration/windows-spotlight'
                    next_image.source_type = DOWNLOADER_TYPE
                    next_image.source_name = 'Microsoft Spotlight'
                    next_image.image_url = asset
                    next_image.found_at_counter = last_image_counter
        return next_image
Esempio n. 4
0
 def __init__(self, config=None):
     """
     Create a downloader for an image source
     :param config: optional, see get_config()
     """
     super().__init__()
     logging.debug('__init__({})'.format(config))
     self.config = config
     self.target_folder = os.path.join(os.environ['LOCALAPPDATA'],
                                       wariety.APP_NAME)
     self.capability = CAPABILITIES['single']
     self.start_url = START_URL
     self.base_url = BASE_URL
     self.downloader_type = DOWNLOADER_TYPE
     self.downloader_desc = DOWNLOADER_DESCRIPTION
     self.next_image = wariety_wallpaper.WarietyWallpaper()
     self.proxies = {}
     if self.config.proxy_enable:
         self.proxies = self.get_proxy()
Esempio n. 5
0
    def get_next_image(self, last_image_counter=0):
        """Retrieves a picture of the day of Wikimedia and returns it
        as an instance of wariety wallpaper. Depending on the counter
        given by 'last_image_counter' returns the next picture.
        """

        logging.debug('get_next_image({})'.format(last_image_counter))

        # Generate empty image
        next_image = wariety_wallpaper.WarietyWallpaper()

        # Set proxy if required
        req = urllib.request.Request(START_URL)
        if self.config.proxy_enable:
            proxy_host = self.proxies['http']
            req.set_proxy(proxy_host, 'http')

        # Receive image data
        page = urllib.request.urlopen(req).read()
        soup = bs4.BeautifulSoup(page, 'html.parser')
        plainlist = soup.find('div', {'class': 'plainlist'})
        hlist = plainlist.findAll('li', {'class': 'hlist'})

        months_list = []

        # Get all month pages
        for hlist_item in hlist:
            hlist_li_href = hlist_item.findAll('a', href=True)
            for hlist_li_href_item in hlist_li_href:
                month_page_url = urllib.parse.urljoin(
                    BASE_URL, hlist_li_href_item['href'])
                months_list.append(month_page_url)

        # Grab URL to images
        counter = -1
        days_counter = 0

        # for month_page in months_list:
        while months_list:
            month_page = months_list.pop(0)

            # Speed up things by counting days per month
            days_counter = days_counter + self.get_no_of_days_of_month_by_url(
                month_page)
            if days_counter < last_image_counter:
                continue
            else:
                page = urllib.request.urlopen(month_page).read()
                soup = bs4.BeautifulSoup(page, 'html.parser')
                potd_tables = soup.findAll('table')
                for potd_table in potd_tables:
                    hyperlinks = potd_table.findAll('a', {'class': 'image'})
                    for hyperlink in hyperlinks:
                        counter = counter + 1
                        if counter == last_image_counter:

                            # Return one image after the last transfered
                            next_image.image_url = urllib.parse.urljoin(
                                BASE_URL, hyperlink['href'])
                            if (next_image.image_url.endswith('jpg')
                                    or next_image.image_url.endswith('jpeg')
                                    or next_image.image_url.endswith('png')):

                                # We just want png or jpeg
                                # Collect image data
                                image_page = urllib.request.urlopen(
                                    next_image.image_url).read()
                                image_soup = bs4.BeautifulSoup(
                                    image_page, 'html.parser')
                                image_file = image_soup.find(id='file')
                                target_url = image_file.a
                                file_info_section = image_soup.find(
                                    'table',
                                    {'class': 'fileinfotpl-type-information'})

                                # Fill image data
                                try:
                                    file_location_section = file_info_section.find(
                                        'td', text='Location')
                                    next_image.location = self.get_text_of_valid_sibling(
                                        file_location_section).strip().split(
                                            '\n')[0]
                                except:
                                    next_image.location = ''
                                try:
                                    file_author_section = file_info_section.find(
                                        'td', text='Author')
                                    next_image.image_author = self.get_text_of_valid_sibling(
                                        file_author_section).strip().split(
                                            '\n')[0]
                                except:
                                    next_image.image_author = ''
                                try:
                                    file_keywords_section = file_info_section.find(
                                        'td', text='Keywords')
                                    next_image.keywords = self.get_text_of_valid_sibling(
                                        file_keywords_section).strip().split(
                                            '\n')[0]
                                except:
                                    next_image.keywords = ''
                                unquoted_target_url = urllib.parse.unquote(
                                    target_url['href'])
                                try:
                                    next_image.image_name = unquoted_target_url.split(
                                        '/')[-1]
                                except:
                                    next_image.image_name = ''
                                next_image.source_url = urllib.parse.unquote(
                                    next_image.image_url)
                                next_image.source_type = DOWNLOADER_TYPE
                                next_image.source_name = DOWNLOADER_DESCRIPTION
                                next_image.image_url = unquoted_target_url
                                next_image.found_at_counter = counter

                                # Store state
                                self.state[
                                    'last_image_counter'] = next_image.found_at_counter
                                startdate = datetime.datetime.now().strftime(
                                    '%Y%m%d')
                                self.state['startdate'] = startdate
                                self.state['idx'] = 0

                                return next_image
                            else:
                                counter = counter - 1
                                continue
Esempio n. 6
0
 def get_next_image(self, last_image_counter=0):
     next_image = wariety_wallpaper.WarietyWallpaper()
     print(self.target_folder)
     return None