Esempio n. 1
0
def day_events(date):
    soup = import_util.url_to_soup(date_to_url(date))
    #find all anchors with e.g. name="42820"
    anchors = soup('a', {'name':re.compile("\d+")})
    for anchor in anchors:
        small = anchor.findNextSibling("small")
        event = dict(date=date, source="wweek")
        venue_stuff = small.contents[1].strip("| ")
        address, phone = venue_stuff.rsplit(",", 1)
        event['venue'] = dict(name=small.b.string, address=address, phone=phone)
        event_name_span = small.findNextSibling("span", "headout_event")
        event_name = import_util.stringify(event_name_span)
        if len(event_name) < 2:
            continue
        event['name'], event['artists'] = parse_event(event_name)
        yield event
Esempio n. 2
0
def day_events(date):
    for url in event_urls_for_date(date):
        soup = import_util.url_to_soup(url)
        event = dict(source="mercury")
        event['date'] = date

        lt = soup.find("h1", "listingTitle")
        crap = lt.find("div", "FeaturesIcons")
        if crap:
            crap.extract()
        event['name'] = import_util.stringify(lt).strip()

        el = soup.find("div", id="EventLocation")
        event['venue'] = dict(name=el.ul.li.h4.find("a", recursive=False).string)
        event['artists'] = []

        artists_str = event['name']
        if artists_str.find(":") != -1:
            event['name'], artists_str = artists_str.split(":", 1)
        event['artists'] = artists_str.split(",")
        event['artists'] = [a.strip() for a in event['artists']]
        yield event