示例#1
0
def load_agencies(request, indexfile=""):

    #Open summary excelfile
    wbin = xlrd.open_workbook(indexfile)

    #Agencies
    sh_source = wbin.sheet_by_name(u'Agencies')
    for rownum in range(1,sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname  = row[0]
        fullname   = row[1]
        parentname = row[2]
        website    = row[3]
        scraped    = True
        newagency = Agency(shortname=shortname, name=fullname, parentname=parentname,
                           website=website, notes="", scraped=scraped)
        newagency.save()
    return HttpResponse("Agency details loaded from file.")
示例#2
0
def load_agencies(request, indexfile=""):

    #Open summary excelfile
    wbin = xlrd.open_workbook(indexfile)

    #Agencies
    sh_source = wbin.sheet_by_name(u'Agencies')
    for rownum in range(1, sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        parentname = row[2]
        website = row[3]
        scraped = True
        newagency = Agency(shortname=shortname,
                           name=fullname,
                           parentname=parentname,
                           website=website,
                           notes="",
                           scraped=scraped)
        newagency.save()
    return HttpResponse("Agency details loaded from file.")
示例#3
0
def load_indicator_spreadsheet(request, indexfile=""):

    #Open summary excelfile
    wbin = xlrd.open_workbook(indexfile)

    #Agencies
    sh_source = wbin.sheet_by_name(u'Agencies')
    for rownum in range(1, sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        parentname = row[2]
        website = row[3]
        scraped = True
        newagency = Agency(shortname=shortname,
                           name=fullname,
                           parentname=parentname,
                           website=website,
                           notes="",
                           scraped=scraped)
        newagency.save()

    #Datasources
    sh_source = wbin.sheet_by_name(u'Datasources Used')
    for rownum in range(1, sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        sourceagency = row[2]
        sourcefile = row[5]
        sourceurl = row[7]
        #FIXIT: should be the times when scrapers were run, not the date the file
        #was uploaded
        todaysdate = datetime.now()
        newsource = Datasource(shortname=shortname,
                               name=fullname,
                               notes="",
                               url=sourceurl,
                               dateadded=todaysdate,
                               datechecked=todaysdate,
                               scraper=True,
                               localfile=sourcefile)
        newsource.save()

    sh_source = wbin.sheet_by_name(u'Suggested Sources')
    for rownum in range(1, sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        sourceagency = row[2]
        sourceurl = row[3]
        newsource = Datasource(shortname=shortname,
                               name=fullname,
                               notes="",
                               url=sourceurl,
                               dateadded=todaysdate,
                               datechecked=todaysdate,
                               scraper=False,
                               localfile="")
        newsource.save()

    #Indicators
    indicators = {}
    sh_ind = wbin.sheet_by_name(u'Indicators Needed')
    for rownum in range(1, sh_ind.nrows):
        row = sh_ind.row_values(rownum)
        indcode = row[0]
        indname = row[1]
        indicators[indcode] = indname
##        newindicator = Indicator()
##        newindicator.save()

#Indicators in sources
    found = {}
    sh_found = wbin.sheet_by_name(u'Indicators Found')
    for rownum in range(1, sh_found.nrows):
        row = sh_found.row_values(rownum)
        sourcecode = row[0]
        indcode = row[1]
        csvheading = row[4]
        #Only process indicators that we have data for
        if not (csvheading == ""):
            if not (found.has_key(sourcecode)):
                found[sourcecode] = {}
            found[sourcecode][csvheading] = indcode
##        newfound = foundIndicator(indicator=, datasource=,
##                                  csvfile=, csvindname=,
##                                  datasourceindname=,
##                                  datasourcedesc=, notes=)
##        newfound.save()

    return HttpResponse("Indicator details loaded from file.")
示例#4
0
def load_indicator_spreadsheet(request, indexfile=""):

    #Open summary excelfile
    wbin = xlrd.open_workbook(indexfile)

    #Agencies
    sh_source = wbin.sheet_by_name(u'Agencies')
    for rownum in range(1,sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname  = row[0]
        fullname   = row[1]
        parentname = row[2]
        website    = row[3]
        scraped    = True
        newagency = Agency(shortname=shortname, name=fullname, parentname=parentname,
                           website=website, notes="", scraped=scraped)
        newagency.save()
    
    #Datasources
    sh_source = wbin.sheet_by_name(u'Datasources Used')
    for rownum in range(1,sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        sourceagency = row[2]
        sourcefile = row[5]
        sourceurl = row[7]
        #FIXIT: should be the times when scrapers were run, not the date the file
        #was uploaded
        todaysdate = datetime.now() 
        newsource = Datasource(shortname=shortname, name=fullname, notes="",
                               url=sourceurl,
                               dateadded=todaysdate, datechecked=todaysdate,
                               scraper=True, localfile=sourcefile)
        newsource.save()
        
    sh_source = wbin.sheet_by_name(u'Suggested Sources')
    for rownum in range(1,sh_source.nrows):
        row = sh_source.row_values(rownum)
        shortname = row[0]
        fullname = row[1]
        sourceagency = row[2]
        sourceurl = row[3]
        newsource = Datasource(shortname=shortname, name=fullname, notes="",
                               url=sourceurl,
                               dateadded=todaysdate, datechecked=todaysdate,
                               scraper=False, localfile="")
        newsource.save()
        
    #Indicators
    indicators = {}
    sh_ind = wbin.sheet_by_name(u'Indicators Needed')
    for rownum in range(1,sh_ind.nrows):
        row = sh_ind.row_values(rownum)
        indcode = row[0]
        indname = row[1]
        indicators[indcode] = indname
##        newindicator = Indicator()
##        newindicator.save()

    #Indicators in sources
    found = {}
    sh_found = wbin.sheet_by_name(u'Indicators Found')
    for rownum in range(1,sh_found.nrows):
        row = sh_found.row_values(rownum)
        sourcecode = row[0]
        indcode = row[1]
        csvheading = row[4]
        #Only process indicators that we have data for
        if not(csvheading == ""):
            if not(found.has_key(sourcecode)):
                found[sourcecode] = {}
            found[sourcecode][csvheading] = indcode
##        newfound = foundIndicator(indicator=, datasource=,
##                                  csvfile=, csvindname=,
##                                  datasourceindname=,
##                                  datasourcedesc=, notes=)
##        newfound.save()
    
    return HttpResponse("Indicator details loaded from file.")