Example #1
0
class Crawler:
    """ Classe para crawler. """
    def __init__(self, url, character, downloader):
        self.downloader = downloader
        self.url = url
        self.parser = Parser()
        self.get_url_information(character)

    def get_url_information(self, character):
        """ Monta a URL para recuperar as informações.  """
        url = urljoin(self.url, 'community/?subtopic=characters')
        params = self.config_params(character)
        response = self.downloader.post(url, data=params)

        if self.parser.character_not_found(response.text):
            parsed = self.parser.parse(response.text)
            self.save_data(parsed.__dict__)
            return parsed

    @staticmethod
    def config_params(character):
        """ Cria dicionário para guardar informações do submit. """
        return {'name': character, 'x': 0, 'y': 0}

    @staticmethod
    def save_data(data):
        """ Salva informações coletadas em arquivo json. """
        name = data.get('name').lower().replace(' ', '_')
        with open(f'./proj/database/{name}.json', 'w', encoding='utf-8') as f:
            json.dump(data, f)
            print(f'Personagem {name} foi salvo com sucesso!')
Example #2
0
 def __init__(self, url, character, downloader):
     self.downloader = downloader
     self.url = url
     self.parser = Parser()
     self.get_url_information(character)
Example #3
0
def test_not_found(snapshot, not_found_html):
    confirmation = Parser().character_not_found(not_found_html)
    assert not confirmation
Example #4
0
def test_extract_account_status(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    text = Parser().extract_value(parsed, 'Account Status:')
    snapshot.assert_match(text)
Example #5
0
def test_extract_deaths(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    deaths = Parser().extract_deaths(parsed)
    for death in deaths:
        snapshot.assert_match(death)
Example #6
0
def test_extract_last_login(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    # Usou método específico.
    text = Parser().extract_last_login(parsed, 'Last Login:')
    snapshot.assert_match(text)
Example #7
0
def test_extract_residence(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    text = Parser().extract_value(parsed, 'Residence:')
    snapshot.assert_match(text)
Example #8
0
def test_extract_achievement(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    text = Parser().extract_value(parsed, 'Achievement Points:')
    snapshot.assert_match(text)
Example #9
0
def test_extract_vocation(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    text = Parser().extract_value(parsed, 'Vocation:')
    snapshot.assert_match(text)
Example #10
0
def test_extract_former_name(snapshot, resume_html):
    parsed = BeautifulSoup(resume_html, 'html.parser')
    text = Parser().extract_value(parsed, 'Former Names:')
    snapshot.assert_match(text)