def handleAtlasFile(self):
     
     atlasFileData = open(self.curFilePath)
     dataLines = atlasFileData.readlines()
     
     frames = {}
     for dataLine in dataLines:
         dataLine = str(dataLine)
         frameInfos = dataLine.split(" ")
         frames[str(frameInfos[0])] = frameInfos[1:]
     
     splitPos = self.curFilePath.rfind("/")
     fileDir = self.curFilePath[0:splitPos + 1]
     
     fileName = self.curFilePath[splitPos + 1:]
     dotPos = fileName.rfind(".")
     textureFileName = fileName[0:dotPos] + ".png"
     
     texture = Texture()
     texture.texDir = fileDir
     texture.texName = textureFileName
     
     texPath = str(texture.texDir + texture.texName)
     if not os.path.exists(texPath):
         print "texture %s not exist!" % texPath
         return texPath, None
     texFile = Image.open(texPath)
     texSize = texFile.size
     w, h = texSize[0], texSize[0]
     texture.init(fileDir, textureFileName, w, h)
     
     frameArr = []
     
     for key in frames:
         frame = Frame()
         frame.name = str(key)
         frame.tex = texture
         self.decodeFrame(frame, frames[key])
         frameArr.append(frame)
         
     return texture, frameArr
Exemple #2
0
def get_regionalized_dataframe(data, label_field):
    # We will store the final result here
    ndf = Frame()
    # Now we get the data that we want to average by region (defined in the groups)
    # and also total
    for g in groups:
        # Data points
        ctrs = c.get_groups(g)
        for ctry in ctrs:
            for r in data.rows:
                lv = r.get_by_column_name(label_field).value.get()
                if lv == ctry.name or lv in ctry.alias:
                    r.get_by_column_name(label_field).value.value = ctry.name
                    ndf.rows.append(r)
    return ndf
Exemple #3
0
def extract_countries(g, column_name, df):
    n_df = Frame()
    c = CountryList()
    c.load_wb()

    names = []

    for ctr_name in g:
        ctr = c.get_country_by_name(ctr_name)
        names.append(ctr.name)

        for i in ctr.alias:
            names.append(i)

    for r in df.rows:
        if (r.get_by_column_name(column_name).value.value in names):
            n_df.rows.append(r)

    return (n_df)
Exemple #4
0
def get(data, groups, country_field):
    # We will store the final result here
    ndf = Frame()
    cl = CountryList()
    cl.load_wb()

    # Now we get the data that we want to average by region (defined in the groups)
    # and also total
    # Let's conflate
    cs = []
    for cnt in cl.get_groups(groups):
        if cnt.name not in cs:
            cs.append(cnt.name)

    for r in data.rows:
        if r.get_by_column_name(country_field).value.get() in cs:
            ndf.rows.append(r)

    return ndf
Exemple #5
0
def json_to_frame(j):
    df = Frame()
    for r in j:
        df.add_row(r)

    return df
Exemple #6
0
import re
import requests
from data.Frame import Frame
from countries.list import CountryList
from sources.sdgs.utils import json_to_frame

series = Frame()


def load_series():

    series = json_to_frame(requests.get(url).json())
    return series


def get_series(series_code,
               area_code,
               time_period,
               dimensions,
               page_size=10000):
    url = 'https://unstats.un.org/SDGAPI/v1/sdg/Series/Data'
    q = "%s?%s&%s&%s&%s&%s" % (url, series_code, area_code, time_period,
                               dimensions, page_size)
    series = json_to_frame(requests.get(url).json())
    return series
Exemple #7
0
import re
import requests
from data.Frame import Frame
from geo.list import CountryList
from sources.sdgs.utils import json_to_frame

areas = Frame()


def load_areas():
    url = 'https://unstats.un.org/SDGAPI/v1/sdg/GeoArea/List'
    areas = json_to_frame(requests.get(url).json())
    return areas
Exemple #8
0
import re
import requests
from data.Frame import Frame
from countries.list import CountryList

__url_datasets = 'http://urbandata.unhabitat.org/wp-admin/admin-ajax.php?action=load_from_oipa&call=indicator-filter-options&format=json'

regions = Frame()
indicators = Frame()
cities = Frame()
countries = Frame()

def load_datasets():
    url = __url_datasets
    
    domains = json_to_frame(requests.get(url).json())    
    return domains

Exemple #9
0
''' Download FAO data '''
import re
import requests
from data.Frame import Frame
from sources.fao.utils import json_to_frame
from geo.list import CountryList

__version__ = '0.1.0'

__url_datasets__ = "http://fenixservices.fao.org/faostat/static/bulkdownloads/datasets_E.json"

datasets = []

domains = Frame()
countries = Frame()


def get_domains():
    url = 'http://fenixservices.fao.org/faostat/api/v1/en/groupsanddomains'
    domains = json_to_frame(requests.get(url).json())
    return domains


def load_countries():
    url = 'http://fenixservices.fao.org/faostat/api/v1/en/codes/countries/IG/?show_lists=true'
    countries = json_to_frame(requests.get(url).json())
    return countries


# Get the code of a country
# needed to perform a query
Exemple #10
0
def calculate_weighted_average(df_data, df_weight, label_field):

    df_w = Frame()

    # Go through all data rows
    for d in df_data.rows:
        dr = Row()
        # Now search data for weights based in the label field
        # We have to also search in a row for the label_field
        # and just the first result as the list will only contain
        # one value
        dr.add_cell(
            Cell(column=label_field,
                 value=d.get_by_column_name(label_field).value.value))
        # If we are in a column that is not the label column
        # Now we go through all columns
        for c in d.cells:
            if c.column.name != label_field:
                data_value = d.get_by_column_name(c.column.name).value.value

                dr.add_cell(
                    Cell(column="%s_%s" % (c.column.name, 'value'),
                         value=data_value))

                # in the new row we have already created the label field that we
                # can use here
                # We have to check that the data point exists for the value that we are trying to weight
                if (len(
                        df_weight.search(
                            label_field,
                            dr.get_by_column_name(
                                label_field).value.value).rows) > 0):
                    weight_value = df_weight.search(
                        label_field,
                        dr.get_by_column_name(label_field).value.value
                    ).rows[0].get_by_column_name(c.column.name).value.value
                else:
                    weight_value = None

                if data_value is None or weight_value is None:
                    data_value = None
                    weight_value = None

                dr.add_cell(
                    Cell(column="%s_%s" % (c.column.name, 'weight'),
                         value=weight_value))

                # We will store the weighted value here

                dr.add_cell(
                    Cell(column="%s_%s" % (c.column.name, 'weighted_value'),
                         value=None))

        # We add a column where the weighted value will be stored
        df_w.rows.append(dr)

    # Now let's calculated the weighted values
    for d in df_data.rows:
        for c in d.cells:
            if c.column.name != label_field:
                # Initialize variables to avoid carried-over values
                # from previous iteration

                data_value = None
                weight_value = None
                weighted_value = None

                # Check if weight value exists for data point
                if len(
                        df_weight.search(
                            label_field,
                            d.get_by_column_name(
                                label_field).value.value).rows) > 0:

                    if c.value.value is not None and df_weight.search(
                            label_field,
                            d.get_by_column_name(label_field).value.value
                    ).rows[0].get_by_column_name(
                            c.column.name).value.value is not None:

                        data_value = c.value.value

                    weight_value = df_weight.search(
                        label_field,
                        d.get_by_column_name(label_field).value.value
                    ).rows[0].get_by_column_name(c.column.name).value.value

                    total_weight = df_w.get_column(
                        "%s_%s" % (c.column.name, 'weight')).sum()

                    if data_value is not None and weight_value is not None:
                        if is_number(data_value) == True and is_number(
                                weight_value) == True:
                            weighted_value = (
                                float(weight_value) /
                                float(total_weight)) * float(data_value)
                        else:
                            weighted_value = None
                    else:

                        weighted_value = None

                # If weight value does not exists for the data point
                else:
                    weighted_value = None

                df_w.search(
                    label_field,
                    d.get_by_column_name(
                        label_field).value.value).rows[0].get_by_column_name(
                            "%s_%s" %
                            (c.column.name,
                             'weighted_value')).value.value = weighted_value

    dr = Row()
    dr.add_cell(Cell(column=label_field, value='Average'))

    for c in df_w.get_column_names():
        total_weight = None
        if df_weight.get_column(c).get_type() == 'number':
            total_weight = df_w.get_column(c).sum()
            dr.add_cell(Cell(column=c, value=float(total_weight)))

    df_w.rows.append(dr)

    df_w.id = df_data.id + " - " + df_weight.id
    df_w.description = df_data.description + " weighted against " + df_weight.description
    df_w.source = df_data.id + " : " + df_data.source + " and " + df_weight.id + " : " + df_weight.source
    df_w.source_url = df_data.id + " : " + df_data.source_url + " and " + df_weight.id + " : " + df_weight.source_url

    return df_w
Exemple #11
0
''' Download SDGs data '''
import re
import requests
from data.Frame import Frame
from geo.list import CountryList
from sources.sdgs.utils import json_to_frame

__version__ = '0.1.0'
__url__ = "https://unstats.un.org/SDGAPI/v1/"

datasets = []

domains = Frame()
countries = Frame()
indicators = Frame()

def load_indicators():
    url = 'https://unstats.un.org/SDGAPI/v1/sdg/indicator/List'
    indicators = json_to_frame(requests.get(url).json())
    return countries