Exemplo n.º 1
0
def multiprocess_pages(base_URL, job_title, job_location, page_number):
    """Grab the URLS and other relevant info. from job postings on the page. 

    The Simply Hired URL used for job searching takes another parameter, `pn`, that
    allows you to start the job search at jobs 11-20, 21-30, etc. Use this to grab
    job results from multiple pages at once, and then feed the jobs from each page
    to threads for further parsing. 

    Args: 
    ----
        base_URL: str 
        job_title: str 
        job_location: str 
        page_number: int 
    """

    url = base_URL + '&pn=' + str(page_number)
    html = get_html(url)
    # Each row corresponds to a job.
    jobs = html.select('.js-job')
    threads = []
    mongo_update_lst = []
    for job in jobs:
        thread = RequestInfoThread(job, job_title, job_location)
        thread.start()
        threads.append(thread)
    for thread in threads:
        thread.join()
        mongo_update_lst.append(thread.json_dct)

    store_in_mongo(mongo_update_lst, 'job_postings', 'simplyhired')
Exemplo n.º 2
0
def get_critic_lst_content(critics_hrefs_values, critic_lst_idx):
    """Grab the CSS element that holds all relevant info. for a critic list. 

    For the critic href at the inputted idx in the critics_hrefs_values, grab
    all of the items with the class '.listLargeTitle'. This will then be used 
    to cycle through each one of them and grab information from them. 

    Args: 
    ----
        critics_hrefs_values: list of strings 
        critic_lst_idx: int

    Return: 
    ------
        critic_lst_content_vals: list 
        soup: bs4.BeautifulSoup object
    """

    base_individual_list_url = 'http://www.albumoftheyear.org'
    css_selectors = ['.listLargeTitle']

    critic_url = base_individual_list_url + critics_hrefs_values[critic_lst_idx]
    soup = get_html(base_individual_list_url +
                    critics_hrefs_values[critic_lst_idx])

    critic_content_lst = list(select_soup(soup, css_selectors).values())
    critic_lst_content_vals = critic_content_lst[0]
    # We reverse them because they are posted from the highest ranked (worst album)
    # to the lowest rank (i.e. Post-1 is the highest ranked album on the critic list).
    critic_lst_content_vals.reverse()

    return critic_lst_content_vals, soup
Exemplo n.º 3
0
def multiprocess_pages(base_URL, job_title, job_location, page_start): 
    """Grab the URLS and other relevant info. from job postings on the page. 

    The Indeed URL used for job searching takes another parameter, `start`, that 
    allows you to start the job search at jobs 10-20, 20-30, etc. Use this to grab
    job results from multiple pages at once, passing the result from a page on to
    a thread to grab the details from each job posting. 
    
    Args: 
    ----
        base_URL: str 
        job_title: str 
        job_location: str 
        page_start: int 
    """

    url = base_URL + '&start=' + str(page_start)
    html = get_html(url)
    # Each row corresponds to a job. 
    rows = html.select('.row')
    threads = []
    mongo_update_lst = []
    for row in rows: 
        thread = RequestInfoThread(row, job_title, job_location)
        thread.start()
        threads.append(thread)
    for thread in threads: 
        thread.join()
        mongo_update_lst.append(thread.json_dct)

    store_in_mongo(mongo_update_lst, 'job_postings', 'indeed')
Exemplo n.º 4
0
def multiprocess_pages(base_URL, job_title, job_location, page_num):
    """Grab the URLs and other relevant info. from job postings on the page. 

    The ZipRecruiter URL used for job searching takes an additional parameter,   
    `page`, that allows you to start the job search at page 0-20 (20 is the max). 
    Use this to grab job results from multiple pages at once, and then pass jobs
    on to threads to grab relevant info. 

    Args: 
    ----
        base_URL: str 
        job_title: str 
        job_location: str 
        page_start: int 
    """

    url = query_URL + '&page=' + str(page_num)
    html = get_html(url)
    rows = html.select('.job_result')
    threads = []
    mongo_update_lst = []
    for row in rows:
        thread = RequestInfoThread(row, job_title, job_location)
        thread.start()
        threads.append(thread)
    for thread in threads:
        thread.join()
        mongo_update_lst.append(thread.json_dct)

    store_in_mongo(mongo_update_lst, 'job_postings', 'ziprecruiter')
Exemplo n.º 5
0
    def scrape_pages(self):
        """Scrape all pages stored in `self.web_urls`."""

        for article in self.articles_to_scrape:
            url = article['web_url']

            if url.startswith('/'):
                url = 'http://www.nytimes.com' + url
            sleep(1 / 20)
            soup = get_html(url)
            article_txt = self._parse_soup(soup)

            if article_txt:
                article['text'] = article_txt
Exemplo n.º 6
0
    def _query_href(self):
        """Grab the text from the href. 

        Returns: str of visible text from the href. 
        """
        try:
            soup = get_html(self.href)

            texts = soup.findAll(text=True)
            visible_texts = filter(find_visible_texts, texts)
        except Exception as e:
            print(e)
            visible_texts = ['SSLError', 'happened']

        return ' '.join(visible_texts)
def process_album_title_hrefs(album_title_hrefs, album_titles):
    """Grab the critic and user scores for each inputted href. 

    Loop over the hrefs in `album_title_hrefs`, issue a get request on the URL 
    associated with that href, and then parse the content to grab the User and 
    Critic scores for that album. Store the User and Critic scores in a dictionary
    along with the Album title. Output it all in a list, with one entry per href. 

    Args: 
    ----
        album_title_hrefs: list of strings 
        album_titles: list of strings

    Return: 
    ------
        final_json_lst: list
    """

    base_url = 'http://www.albumoftheyear.org'
    final_json_lst = []
    album_title_hrefs_lst = list(album_title_hrefs.values())
    for idx, href in enumerate(album_title_hrefs_lst[0]):
        soup = get_html(base_url + href)

        center_content_lst = list(select_soup(soup, '#centerContent').values())
        center_content = center_content_lst[0][0]
        user_score = int(find_score(center_content, 'USER SCORE'))
        critic_score = int(find_score(center_content, 'CRITIC SCORE'))

        json_dct = {
            'Album Title': album_titles[idx],
            "User Score": user_score,
            "Critic Score": critic_score
        }
        final_json_lst.append(json_dct)

    return final_json_lst
    while attribute.find('Other') == -1:
        values[attribute] = value
        points_misc_idx += 1
        # The value is always the last item present, surrounded by (), and the 
        # 1+ items before that are the attributes to which those points belong. 
        split_text = sum_points_misc_lst[points_misc_idx].split()
        attribute = ' '.join(split_text[:-1])
        value = split_text[-1].replace('(', '').replace(')', '')
    values[attribute] = value
    points_misc_idx += 1

    return values, points_misc_idx 

if __name__ == '__main__':
    try: 
        year = sys.argv[1]
    except Exception as e: 
        print(e)
        raise Exception('<Usage> Input a year to grab music data for.')

    URL = 'http://www.albumoftheyear.org/list/summary/' + year + '/'
    soup = get_html(URL) 

    css_selectors = ['.artistTitle', '.albumTitle', '.summaryPoints', 
                     '.summaryPointsMisc']
    desired_contents = select_soup(soup, css_selectors)
    desired_contents_text = grab_contents_key(desired_contents, "text")
    desired_contents_renamed = rename_keys(desired_contents_text)
    final_lst = parse_contents(desired_contents_renamed)
    store_in_mongo(final_lst, 'music', 'music_lists')
Exemplo n.º 9
0
        job_location = sys.argv[2]
        radius = sys.argv[3]
    except IndexError:
        raise Exception(
            'Program needs a job title, job location, and radius inputted!')

    base_URL = 'http://www.simplyhired.com/search?'
    query_parameters = [
        'q={}'.format('+'.join(job_title.split())),
        '&l={}'.format('+'.join(job_location.split())),
        '&mi={}'.format(radius), '&fdb=5', '&clst=CTL'
    ]

    query_URL = format_query(base_URL, query_parameters)

    html = get_html(query_URL)
    try:
        num_jobs_txt = str(html.select('.result-headline')[0].text)
        num_jobs = int(parse_num(num_jobs_txt, 2))
    except:
        print('No jobs for search {} in {}'.format(job_title, job_location))
        sys.exit(0)

    current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
    storage_dct = {
        'job_site': 'simplyhired',
        'num_jobs': num_jobs,
        'date': current_date,
        'title': job_title,
        'location': job_location
    }
Exemplo n.º 10
0
    ----
        rating_txt: str
            Text that potentially holds the rating. 
        idx: int
            Holds the rating if the text does not. 

    Return: int
    """

    if len(rating_txt) >= 1:
        rating = int(rating_txt[0].replace('.', ''))
    else:
        rating = idx

    return rating


if __name__ == '__main__':
    lists_url = 'http://www.albumoftheyear.org/lists.php'

    soup = get_html(lists_url)
    critics_content = select_soup(soup, '.criticListBlockTitle')
    critics_names = grab_contents_key(critics_content, "text")
    critics_links = grab_contents_key(critics_content, 'a')
    critics_hrefs = grab_contents_key(critics_links, 'href')

    raw_output = grab_critics_info(critics_names, critics_hrefs)
    formatted_output = [{"Album Title": k, "Critics Scores": v} for \
            k, v in raw_output.items()]
    store_in_mongo(formatted_output, 'music', 'music_lists', key="Album Title")