예제 #1
0
    def _reprocess_issue_without_cvid(self, issue_id):
        '''	Create an issue without a ComicVine ID.	'''

        # Make sure the issue exists
        issue = Issue.objects.get(id=issue_id)

        if issue:
            # 1. Attempt to extract series name, issue number, year and cover.
            extracted = fnameparser.extract(issue.file)
            series_name = extracted[0]
            issue_number = extracted[1]
            issue_year = extracted[2]

            cfh = ComicFileHandler()
            issue_cover = cfh.extract_cover(issue.file)
            issue.page_count = cfh.get_page_count(issue.file)

            # 2. Update Issue information:
            Issue.objects.filter(id=issue_id).update(
                number=issue_number if issue_number else 1,
                date=issue_year +
                '-01-01' if issue_year else datetime.date.today(),
                cover=issue_cover,
            )

            # 3. Update Series information:
            if Series.objects.get(id=issue.series.id):
                Series.objects.filter(id=issue.series.id).update(
                    name=series_name, )
            else:
                series = Series()
                series.name = series_name
                series.save()
                issue.series = series
                issue.save()
예제 #2
0
    def _process_issue_without_cvid(self, filepath):
        '''	Create an issue without a ComicVine ID.	'''

        # Make sure the issue hadn't already been added
        matching_issue = Issue.objects.filter(file=filepath)

        filename = os.path.basename(filepath)

        if not matching_issue:
            # 1. Attempt to extract series name, issue number, and year
            extracted = fnameparser.extract(filepath)
            series_name = extracted[0]
            issue_number = extracted[1]
            issue_year = extracted[2]

            # 2. Set Issue Information:
            issue = Issue()
            issue.file = filepath
            issue.number = issue_number if issue_number else 1
            issue.date = issue_year + '-01-01' if issue_year else datetime.date.today(
            )

            cfh = ComicFileHandler()
            issue.cover = cfh.extract_cover(filepath)
            issue.page_count = cfh.get_page_count(filepath)

            # 3. Set Series Information:
            matching_series = Series.objects.filter(name=series_name)

            if not matching_series:
                series = Series()
                series.name = series_name
                series.save()
                issue.series = series
            else:
                issue.series = matching_series[0]

            # 4. Save Issue.
            issue.save()
        else:
            self._reprocess_issue_without_cvid(matching_issue[0].id)

        self.logger.info(
            '\"%(filename)s\" was processed successfully as \"%(series)s - #%(number)s\"'
            % {
                'filename': filename,
                'series': issue.series.name,
                'number': issue.number
            })
예제 #3
0
    def _create_issue_without_cvid(self, filename):

        # Make sure the issue hadn't already been added
        matching_issue = Issue.objects.filter(file=self.directory_path +
                                              filename)

        if not matching_issue:
            # Attempt to extract series name, issue number, and year
            extracted = fnameparser.extract(filename)
            series_name = extracted[0]
            issue_number = extracted[1]
            issue_year = extracted[2]

            # 1. Set basic issue information:
            issue = Issue()
            issue.file = self.directory_path + filename
            issue.cvid = ''
            issue.cvurl = ''
            issue.name = ''
            issue.desc = ''

            if issue_number:
                issue.number = issue_number
            else:
                issue.number = 1

            if issue_year:
                issue.date = issue_year + '-01-01'
            else:
                issue.date = datetime.date.today()

            cfh = ComicFileHandler()
            issue.cover = cfh.extract_cover(self.directory_path + filename)

            # 2. Set Series info:
            matching_series = Series.objects.filter(name=series_name)

            if not matching_series:
                series = Series()

                series.cvid = ''
                series.cvurl = ''
                series.name = series_name
                series.desc = ''

                # 4. Save Series
                series.save()
                issue.series = series

            else:
                issue.series = matching_series[0]

            # 5. Save issue.
            issue.save()
예제 #4
0
    def _scrape_issue(self, filename, cvid):
        # Make API call and store issue response
        request_issue = Request(self.baseurl + 'issue/4000-' + str(cvid) +
                                '/?format=json&api_key=' + self._api_key +
                                self.issue_fields)
        response_issue = json.loads(
            urlopen(request_issue).read().decode('utf-8'))
        time.sleep(1)

        # 1. Set basic issue information:
        issue = Issue()
        issue.file = self.directory_path + filename
        issue.cvid = response_issue['results']['id']
        issue.cvurl = response_issue['results']['site_detail_url']
        issue.name = response_issue['results']['name'] if response_issue[
            'results']['name'] else ''
        issue.number = response_issue['results']['issue_number']
        issue.date = response_issue['results']['cover_date']

        if response_issue['results']['deck']:
            issue.desc = response_issue['results']['deck']
        elif response_issue['results']['description']:
            issue.desc = response_issue['results']['description']
        else:
            issue.desc = ''

        issue_cover_url = self.imageurl + response_issue['results']['image'][
            'super_url'].rsplit('/', 1)[-1]
        issue_cover_filename = unquote_plus(issue_cover_url.split('/')[-1])
        issue.cover = urlretrieve(
            issue_cover_url, 'media/images/covers/' + issue_cover_filename)[0]

        # 2. Set Series info:
        matching_series = Series.objects.filter(
            cvid=response_issue['results']['volume']['id'])

        if not matching_series:
            series = Series()

            request_series = Request(
                response_issue['results']['volume']['api_detail_url'] +
                '?format=json&api_key=' + self._api_key + self.series_fields)
            response_series = json.loads(
                urlopen(request_series).read().decode('utf-8'))
            time.sleep(1)

            series.cvid = response_series['results']['id']
            series.cvurl = response_series['results']['site_detail_url']
            series.name = response_series['results']['name']
            series.year = response_series['results']['start_year']

            if response_series['results']['deck']:
                series.desc = response_series['results']['deck']
            elif response_series['results']['description']:
                series.desc = response_series['results']['description']
            else:
                series.desc = ''

            # 3. Set Publisher info:
            matching_publisher = Publisher.objects.filter(
                cvid=response_series['results']['publisher']['id'])

            if not matching_publisher:
                publisher = Publisher()

                # Store publisher response
                request_publisher = Request(
                    response_series['results']['publisher']['api_detail_url'] +
                    '?format=json&api_key=' + self._api_key +
                    self.publisher_fields)
                response_publisher = json.loads(
                    urlopen(request_publisher).read().decode('utf-8'))
                time.sleep(1)

                if response_publisher['results']['image']:
                    publisher_logo_url = self.imageurl + response_publisher[
                        'results']['image']['super_url'].rsplit('/', 1)[-1]
                    publisher_logo_filename = unquote_plus(
                        publisher_logo_url.split('/')[-1])
                    publisher_logo_filepath = urlretrieve(
                        publisher_logo_url, 'media/images/publishers/' +
                        publisher_logo_filename)[0]
                else:
                    publisher_logo_filepath = ''

                publisher.cvid = response_publisher['results']['id']
                publisher.cvurl = response_publisher['results'][
                    'site_detail_url']
                publisher.name = response_publisher['results']['name']

                if response_publisher['results']['deck']:
                    publisher.desc = response_publisher['results']['deck']
                elif response_publisher['results']['description']:
                    publisher.desc = response_publisher['results'][
                        'description']
                else:
                    publisher.desc = ''

                publisher.logo = publisher_logo_filepath

                publisher.save()
                series.publisher = publisher

            else:
                series.publisher = matching_publisher[0]

            series.save()
            issue.series = series

        else:
            issue.series = matching_series[0]

        # 4. Save issue.
        issue.save()

        # 5. Set Arcs info
        for story_arc in response_issue['results']['story_arc_credits']:
            time.sleep(1)

            # Check to make sure the series doesn't already exist.
            matching_arc = Arc.objects.filter(cvid=story_arc['id'])

            if not matching_arc:
                # Store Arc response
                request_arc = Request(story_arc['api_detail_url'] +
                                      '?format=json&api_key=' + self._api_key +
                                      self.arc_fields)
                response_arc = json.loads(
                    urlopen(request_arc).read().decode('utf-8'))

                # Get Arc image
                if response_arc['results']['image']:
                    arc_image_url = self.imageurl + response_arc['results'][
                        'image']['super_url'].rsplit('/', 1)[-1]
                    arc_image_filename = unquote_plus(
                        arc_image_url.split('/')[-1])
                    arc_image_filepath = urlretrieve(
                        arc_image_url,
                        'media/images/arcs/' + arc_image_filename)[0]
                else:
                    arc_image_filepath = ''

                if response_arc['results']['deck']:
                    arc_desc = response_arc['results']['deck']
                elif response_arc['results']['description']:
                    arc_desc = response_arc['results']['description']
                else:
                    arc_desc = ''

                # Create Arc
                issue.arcs.create(
                    cvid=response_arc['results']['id'],
                    cvurl=response_arc['results']['site_detail_url'],
                    name=response_arc['results']['name'],
                    desc=arc_desc,
                    image=arc_image_filepath)

            else:
                # Add found Arc to dictionary
                issue.arcs.add(matching_arc[0])

        # 6. Set Characters info
        for character in response_issue['results']['character_credits']:
            time.sleep(1)

            # Check to make sure the character doesn't already exist.
            matching_character = Character.objects.filter(cvid=character['id'])

            if not matching_character:
                # Store Character response
                request_character = Request(character['api_detail_url'] +
                                            '?format=json&api_key=' +
                                            self._api_key +
                                            self.character_fields)
                response_character = json.loads(
                    urlopen(request_character).read().decode('utf-8'))

                # Get character image
                if response_character['results']['image']:
                    character_image_url = self.imageurl + response_character[
                        'results']['image']['super_url'].rsplit('/', 1)[-1]
                    character_image_filename = unquote_plus(
                        character_image_url.split('/')[-1])
                    character_image_filepath = urlretrieve(
                        character_image_url, 'media/images/characters/' +
                        character_image_filename)[0]
                else:
                    character_image_filepath = ''

                if response_character['results']['deck']:
                    character_desc = response_character['results']['deck']
                elif response_character['results']['description']:
                    character_desc = response_character['results'][
                        'description']
                else:
                    character_desc = ''

                # Create Character
                issue.characters.create(
                    cvid=response_character['results']['id'],
                    cvurl=response_character['results']['site_detail_url'],
                    name=response_character['results']['name'],
                    desc=character_desc,
                    image=character_image_filepath)

            else:
                # Add found Character to Issue
                issue.characters.add(matching_character[0])

        # 7. Set Creators info
        for person in response_issue['results']['person_credits']:
            time.sleep(1)

            # Check to make sure the character doesn't already exist.
            matching_creator = Creator.objects.filter(cvid=person['id'])

            if not matching_creator:
                # Store Character response
                request_creator = Request(person['api_detail_url'] +
                                          '?format=json&api_key=' +
                                          self._api_key + self.creator_fields)
                response_creator = json.loads(
                    urlopen(request_creator).read().decode('utf-8'))

                # Get character image
                if response_creator['results']['image']:
                    creator_image_url = self.imageurl + response_creator[
                        'results']['image']['super_url'].rsplit('/', 1)[-1]
                    creator_image_filename = unquote_plus(
                        creator_image_url.split('/')[-1])
                    creator_image_filepath = urlretrieve(
                        creator_image_url,
                        'media/images/creators/' + creator_image_filename)[0]
                else:
                    creator_image_filepath = ''

                if response_creator['results']['deck']:
                    creator_desc = response_creator['results']['deck']
                elif response_creator['results']['description']:
                    creator_desc = response_creator['results']['description']
                else:
                    creator_desc = ''

                # Create Character
                issue.creators.create(
                    cvid=response_creator['results']['id'],
                    cvurl=response_creator['results']['site_detail_url'],
                    name=response_creator['results']['name'],
                    desc=creator_desc,
                    image=creator_image_filepath)

            else:
                # Add found Character to Issue
                issue.creators.add(matching_creator[0])

        # 8. Set Teams info
        for team in response_issue['results']['team_credits']:
            time.sleep(1)

            matching_team = Team.objects.filter(cvid=team['id'])

            if not matching_team:
                request_team = Request(team['api_detail_url'] +
                                       '?format=json&api_key=' +
                                       self._api_key + self.team_fields)
                response_team = json.loads(
                    urlopen(request_team).read().decode('utf-8'))

                if response_team['results']['image']:
                    team_image_url = self.imageurl + response_team['results'][
                        'image']['super_url'].rsplit('/', 1)[-1]
                    team_image_filename = unquote_plus(
                        team_image_url.split('/')[-1])
                    team_image_filepath = urlretrieve(
                        team_image_url,
                        'media/images/teams/' + team_image_filename)[0]
                else:
                    team_image_filepath = ''

                if response_team['results']['deck']:
                    team_desc = response_team['results']['deck']
                elif response_team['results']['description']:
                    team_desc = response_team['results']['description']
                else:
                    team_desc = ''

                issue.teams.create(
                    cvid=response_team['results']['id'],
                    cvurl=response_team['results']['site_detail_url'],
                    name=response_team['results']['name'],
                    desc=team_desc,
                    image=team_image_filepath)

                for character in response_team['results']['characters']:
                    matching_character = Character.objects.filter(
                        cvid=character['id'])
                    if matching_character:
                        team_item = Team.objects.filter(cvid=team['id'])
                        matching_character[0].teams.add(team_item[0])

            else:
                issue.teams.add(matching_team[0])