def test_it_saves_a_unicode_csv_to_the_database():
    sheets = extract.validate(extract.extract('fixture/mps_unicode.csv'))
    extract.save(sheets)

    data = scraperwiki.sql.select('* from swdata')
    row = data[460]
    assert_equals(row['MP Name'], 'Michelle Gildernew')
    assert_equals(row['Party'], u'Sinn Féin')
def test_it_saves_to_the_database():
    sheets = extract.validate(extract.extract('fixture/simple.xlsx'))
    extract.save(sheets)

    data = scraperwiki.sql.select('* from Sheet1')
    row = data[2]
    assert_equals(row['Year'], 2012)
    assert_equals(row['Awesomeness'], 8)
Ejemplo n.º 3
0
def test_it_can_extract_an_html_csv():
    sheets = extract.validate(extract.extract('fixture/twitter-archive.csv'))
    assert_equals(len(sheets), 1)

    row = sheets['swdata'][3]
    assert_equals(
        row['source'],
        '<a href="http://www.tweetdeck.com" rel="nofollow">TweetDeck</a>')
def test_it_saves_to_the_database():
    sheets = extract.validate(extract.extract('fixture/simple.xlsx'))
    extract.save(sheets)

    data = scraperwiki.sql.select('* from Sheet1')
    row = data[2]
    assert_equals(row['Year'], 2012)
    assert_equals(row['Awesomeness'], 8)
def test_it_saves_a_unicode_csv_to_the_database():
    sheets = extract.validate(extract.extract('fixture/mps_unicode.csv'))
    extract.save(sheets)

    data = scraperwiki.sql.select('* from swdata')
    row = data[460]
    assert_equals(row['MP Name'], 'Michelle Gildernew')
    assert_equals(row['Party'], u'Sinn Féin')
def test_it_can_extract_a_unicode_csv():
    sheets = extract.validate(extract.extract('fixture/mps_unicode.csv'))
    assert_equals(len(sheets), 1)

    sheet = sheets['swdata']
    assert_equals(len(sheet), 653)
    row = sheet[460]
    assert_equals(row['MP Name'], 'Michelle Gildernew')
    assert_equals(row['Party'], u'Sinn Féin')
def test_it_can_extract_a_unicode_csv():
    sheets = extract.validate(extract.extract('fixture/mps_unicode.csv'))
    assert_equals(len(sheets), 1)

    sheet = sheets['swdata']
    assert_equals(len(sheet), 653)
    row = sheet[460]
    assert_equals(row['MP Name'], 'Michelle Gildernew')
    assert_equals(row['Party'], u'Sinn Féin')
def test_it_can_extract_a_simple_csv_file():
    sheets = extract.validate(extract.extract('fixture/simple.csv'))
    assert_equals(type(sheets), OrderedDict)
    assert_equals(len(sheets), 1)

    sheet = sheets['swdata']
    assert_equals(len(sheet), 4)
    row = sheet[2]
    assert_equals(row['Year'], 2012)
    assert_equals(row['Awesomeness'], 8)
def test_it_can_extract_a_simple_csv_file():
    sheets = extract.validate(extract.extract('fixture/simple.csv'))
    assert_equals(type(sheets), OrderedDict)
    assert_equals(len(sheets), 1)

    sheet = sheets['swdata']
    assert_equals(len(sheet), 4)
    row = sheet[2]
    assert_equals(row['Year'], 2012)
    assert_equals(row['Awesomeness'], 8)
def test_it_can_extract_an_html_csv():
    sheets = extract.validate(extract.extract('fixture/twitter-archive.csv'))
    assert_equals(len(sheets), 1)

    row = sheets['swdata'][3]
    assert_equals(row['source'], '<a href="http://www.tweetdeck.com" rel="nofollow">TweetDeck</a>')
Ejemplo n.º 11
0
def test_it_can_extract_a_latin1_csv():
    sheets = extract.validate(extract.extract('fixture/ENH-CCG-spend-2013.csv'))
    assert_equals(len(sheets), 1)

    row = sheets['swdata'][1]
    assert_equals(row['Purchase invoice number'], 11223)