示例#1
0
文件: data.py 项目: nprapps/books17
def get_books_goodreads_ids(input_filename=os.path.join('data', 'books.csv'),
        output_filename=os.path.join('data', 'goodreads_ids.csv')):
    """
    Retrieve GoodReads slugs corresponding to books in the books spreadsheet.

    """
    fieldnames = [
        # Only include enough fields to identify the book
        'title',
        'isbn',
        'goodreads_id'
    ]

    with open(input_filename) as readfile:
        reader = CSVKitDictReader(readfile, encoding='utf-8')
        reader.fieldnames = [name.strip().lower() for name in reader.fieldnames]

        with open(output_filename, 'wb') as fout:
            writer = CSVKitDictWriter(fout, fieldnames=fieldnames)
            writer.writeheader()

            for book in reader:

                output_book = {'title': book['title'], 'isbn': book['isbn'], 'goodreads_id': ''}

                if book['isbn']:
                    output_book['goodreads_id'] = Book.get_goodreads_id(book['isbn'])

                writer.writerow(output_book)

                # According to the Goodreads API documenation (https://www.goodreads.com/api/terms)
                # the rate limit is 1 request per second.
                time.sleep(2)
示例#2
0
def parse_books_csv():
    """
    Parses the books CSV to JSON.
    Creates book objects which are cleaned and then serialized to JSON.
    """
    get_tags()

    # Open the CSV.
    with open('data/books.csv', 'r') as readfile:
        reader = CSVKitDictReader(readfile, encoding='utf-8')
        reader.fieldnames = [
            name.strip().lower() for name in reader.fieldnames
        ]
        books = list(reader)

    logger.info("Start parse_books_csv(): %i rows." % len(books))

    book_list = []

    tags = {}

    for book in books:

        # Skip books with no title or ISBN
        if book['title'] == "":
            continue

        if book['isbn'] == "":
            logger.error('no isbn for title: %s' % book['title'])
            continue

        # Init a book class, passing our data as kwargs.
        # The class constructor handles cleaning of the data.
        try:
            b = Book(**book)
        except Exception, e:
            logger.error("Exception while parsing book: %s. Cause %s" %
                         (book['title'], e))
            continue

        for tag in b.tags:
            if not tags.get(tag):
                tags[tag] = 1
            else:
                tags[tag] += 1

        # Grab the dictionary representation of a book.
        book_list.append(b.__dict__)
示例#3
0
def process_armlist():
    # Create output files folder if needed
    OUTPUT_PATH = INPUT_PATH
    if not os.path.exists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)

    # Initialize geocoder
    geocoder = Nominatim()

    with open('%s/%s.csv' % (INPUT_PATH, OUTPUT_FILE), 'w') as fout:
        writer = CSVKitDictWriter(fout,
                                  fieldnames=HEADER,
                                  extrasaction='ignore')
        writer.writeheader()
        with open('%s/%s.csv' % (INPUT_PATH, INPUT_FILE), 'r') as f:
            reader = CSVKitDictReader(f)
            count = 0
            for row in reader:
                count += 1
                if count % 1000 == 0:
                    print "processed %s records" % count
                if LIMIT and (count >= LIMIT_SAMPLE):
                    break

                # Clean data
                clean(row)
                # Geocode
                # geocode(row, geocoder)
                geocode_nominatim(row, geocoder)
                # Write to csv file
                writer.writerow(row)
            print('finished processing {}.csv'.format(INPUT_FILE))
示例#4
0
def get_books_itunes_ids(input_filename=os.path.join('data', 'books.csv'),
                         output_filename=os.path.join('data',
                                                      'itunes_ids.csv')):
    """
    Retrieve iTunes IDs corresponding to books in the books spreadsheet.

    """
    fieldnames = [
        # Only include enough fields to identify the book
        'title',
        'isbn',
        'itunes_id',
    ]

    with open(input_filename) as readfile:
        reader = CSVKitDictReader(readfile, encoding='utf-8')
        reader.fieldnames = [
            name.strip().lower() for name in reader.fieldnames
        ]

        with open(output_filename, 'wb') as fout:
            writer = CSVKitDictWriter(fout, fieldnames=fieldnames)
            writer.writeheader()

            for book in reader:
                # Note that we don't create Book objects because the
                # parsing/lookup takes too long and we only need to lookup the
                # iTunes ID.

                output_book = {k: book[k] for k in fieldnames}

                if book['title']:
                    output_book['itunes_id'] = Book.get_itunes_id(
                        book['title'])

                writer.writerow(output_book)

                # We have to wait to avoid API throttling.  According to
                # the Enterprise Partner Feed documentation, the limit is ~20
                # calls per minute.  See
                # https://affiliate.itunes.apple.com/resources/documentation/itunes-enterprise-partner-feed/
                # I had previously tried a sleep time of 5 and many requests
                # failed
                time.sleep(10)
示例#5
0
def load_geocoded_cache():
    """ Load persisted geocoded locations"""
    try:
        with open('%s/cached_locations.csv' % CACHE_PATH, 'r') as f:
            reader = CSVKitDictReader(f)
            for row in reader:
                cache[row['address']] = [row['longitude'], row['latitude']]
    except IOError:
        # No cache file found
        pass
示例#6
0
def load_state_normalized():
    """ State with AP and USPS abbreviations"""
    try:
        with open('%s/%s.csv' % (INPUT_PATH, STATE_FILE), 'r') as f:
            reader = CSVKitDictReader(f)
            for row in reader:
                states[row['name']] = [row['ap'], row['usps']]
    except IOError:
        # No cache file found
        pass
示例#7
0
def get_station_coverage_headlines(
        csv_path=DEFAULT_STATION_COVERAGE_CSV_PATH,
        output_path=DEFAULT_STATION_COVERAGE_HEADLINES_CSV_PATH,
        isbn_key=DEFAULT_ISBN_KEY,
        title_key=DEFAULT_TITLE_KEY,
        url_key=DEFAULT_URL_KEY,
        headline_key=DEFAULT_HEADLINE_KEY):
    """
    Get headlines for station coverage links.

    Args:
        csv_path (str): Path to input CSV file.
        output_path (str): Path to output CSV file.
        isbn_key (str): Column name in the CSV data for the column that
            contains the book's ISBN.
        title_key (str): Column name in the CSV data for the column that
            contains the book's title.
        url_key (str): Column name in the CSV data for the column that
            contains the station coverage URL.
        headline_key (str): Column name in the CSV data for the colum that
            contains the station coverage headline.

    """
    with open(csv_path) as f:
        reader = CSVKitDictReader(f)

        with open(output_path, 'wb') as fout:
            fieldnames = [title_key, isbn_key, headline_key]
            writer = CSVKitDictWriter(fout, fieldnames=fieldnames)
            writer.writeheader()

            for row in reader:
                output_row = {}
                output_row[isbn_key] = row[isbn_key]
                output_row[title_key] = row[title_key]
                url = row[url_key]
                if url:
                    output_row[headline_key] = get_link_title(url)
                writer.writerow(output_row)
示例#8
0
def run(args):
    try:
        if args.debug:
            logger.setLevel(logging.DEBUG)

        if args.no_cache:
            load_geocoded_cache()

        # Create output
        if not os.path.exists(OUTPUT_PATH):
            os.makedirs(OUTPUT_PATH)

        # Initialize geocoder
        geocoder = Nominatim()

        with open('%s/output.csv' % OUTPUT_PATH, 'w') as fout:
            writer = CSVKitDictWriter(fout, fieldnames=HEADER,
                                      extrasaction='ignore',
                                      quoting=QUOTE_ALL)
            writer.writeheader()
            with open(args.input, 'r') as f:
                reader = CSVKitDictReader(f)
                logger.info('start processing %s' % args.input)
                for ix, row in enumerate(reader):
                    if (ix + 1) % 100 == 0:
                        logger.debug("processed %s records" % (ix + 1))
                    if args.sample and (ix >= args.sample):
                        break
                    # Geocode
                    geocode_nominatim(row, geocoder)
                    # Write to csv file
                    writer.writerow(row)
                logger.info('finished processing %s' % args.input)
    finally:
        if args.no_cache:
            # Always persist cache file to disk
            persist_cache()
示例#9
0
IMPORTANT = []
with open('important.txt', 'r') as readfile:
    IMPORTANT = list(readfile.read().split(','))

def send_message(message):
    payload = {}
    payload['text'] = message
    payload['icon_url'] = 'https://s3-us-west-2.amazonaws.com/slack-files2/avatars/2014-12-19/3261146648_b3d6178658b2635020f0_48.jpg'
    payload['username'] = "******"

    payload_string = json.dumps(payload)
    r = requests.post(WEBHOOK_URL, data=payload_string)

with open('old.csv', 'r') as readfile:
    old_cases = list(CSVKitDictReader(readfile))

with open('new.csv', 'r') as readfile:
    new_cases = list(CSVKitDictReader(readfile))

new_case_ids = Set([x['docket'] for x in new_cases])
old_case_ids = Set([x['docket'] for x in old_cases])

if len(new_cases) > len(old_cases):
    cases = list(new_case_ids.difference(old_case_ids))
    for c in cases:
        for co in new_cases:
            if c == co['docket']:
                message = ''
                if co['docket'] in IMPORTANT:
                    message += ":star: "
示例#10
0
def merge_external_links(
        books_csv_path=DEFAULT_BOOKS_CSV,
        links_json_path=DEFAULT_EXTERNAL_LINKS_JSON_PATH,
        output_csv_path=DEFAULT_EXTERNAL_LINKS_OUTPUT_CSV_PATH):
    """
    Create a CSV file containing external links.

    Create a CSV file containing external links that can be copied into the
    books Google Spreadsheet.

    Args:
        books_csv_path (str): Path to CSV file containing data from the books
            Google Spreadsheet.
        links_json_path (str): Path to JSON file created by
            `parse_external_links_csv()`.
        output_csv_path (str): Path to output CSV file.

    """
    fieldnames = [
        # Only include enough fields to identify the book
        'title',
        'isbn',
        'external_links_html',
    ]
    with open(links_json_path) as jsonf:
        lookup = json.load(jsonf)
        matched = set()

        with open(books_csv_path) as readfile:
            reader = CSVKitDictReader(readfile, encoding='utf-8')
            reader.fieldnames = [
                name.strip().lower() for name in reader.fieldnames
            ]

            with open(output_csv_path, 'wb') as fout:
                writer = CSVKitDictWriter(fout, fieldnames=fieldnames)
                writer.writeheader()

                for book in reader:
                    output_book = {
                        'title': book['title'],
                        'isbn': book['isbn'],
                        'external_links_html': '',
                    }

                    if book['isbn']:
                        try:
                            links, matching_isbn = lookup_links_by_isbn(
                                book['isbn'], lookup)
                            output_book['external_links_html'] = ','.join(
                                links)
                            matched.add(matching_isbn)
                        except KeyError:
                            # No matching member station coverage.  This is OK.
                            pass

                    writer.writerow(output_book)

            # Do an audit to see if there are any ISBNs in the member station
            # responses that didn't match books.
            for isbn in lookup:
                if isbn not in matched:
                    logger.warn("No matching book found for ISBN %s" % (isbn))