Пример #1
0
def get_wgi_indicator_dict(ind_id, year):
    index_url = '{}{}?format={}&date={}'.format(BASE_API, ind_id, FORMAT, year)
    response = get_response(index_url).json()
    pages = response[0]['pages']
    ind_countries_info = dict()
    for page in range(1, pages + 1):
        response = get_response('{}&page={}'.format(index_url, page)).json()
        ind_infos = response[1]
        for info in ind_infos:
            country_code = info['countryiso3code']
            value = info['value']
            year = info['date']
            if value is not None:
                value = round(value, 2)
            ind_country_info = dict()
            ind_country_info['year'] = year
            ind_country_info['value'] = value
            ind_countries_info[country_code] = ind_country_info
    return ind_countries_info
Пример #2
0
 def _download_ts(self, index, url):
     ts_response = get_response(url, use_proxy=self.use_proxy, timeout=10)
     if ts_response.status_code != 200:
         raise Exception(f'download{url} fail')
     else:
         digit = len(str(self.total))
         ts_id = f'{index:0{digit}}.ts'
         file = Path.joinpath(PATH_TEMP, ts_id)
         with open(file, 'wb') as ts_file:
             ts_file.write(ts_response.content)
Пример #3
0
def get_hdi_index_id(index_name):
	result=get_response(index_id_url)
	index_id_data=result.json()
	index_id=None
	for index_info in index_id_data:
		if index_info.get('indicator')==index_name:
			index_id=index_info.get('id')
			break
	#rewrite to return a dictionary:  index_id: index_name
	# TODO: add raise for status
	return index_id
Пример #4
0
def get_gpi_index_dictionary(year):

    response = get_response(index_url_for_download)
    if not response:
        return None
    gpi_data = response.content
    data_file = write_data(file_path, gpi_data)

    df = read_pdf(file_path, pages='10,11')
    frame_part1 = df[['COUNTRY', 'SCORE']]
    frame_part2 = df[['COUNTRY SCORE.1', 'Unnamed: 10']]
    frame_part3 = df[['COUNTRY SCORE', 'Unnamed: 6']]

    frame_part2 = frame_part2.rename(columns={
        'COUNTRY SCORE.1': 'COUNTRY',
        'Unnamed: 10': 'SCORE'
    })
    frame_part3 = frame_part3.rename(columns={
        'COUNTRY SCORE': 'COUNTRY',
        'Unnamed: 6': 'SCORE'
    })

    df = pd.DataFrame(np.concatenate(
        [frame_part1.values, frame_part2.values, frame_part3.values]),
                      columns=frame_part1.columns)
    df = df.dropna()
    df = df[df['COUNTRY'] != 'COUNTRY']
    df['SCORE'] = df['SCORE'].astype(float).round(3)
    df = df.set_index('COUNTRY')
    iso_names=['Czechia', 'Bolivia, Plurinational State of',\
    'Kyrgyzstan', "Côte d'Ivoire",'Viet Nam','Gambia',\
    'Macedonia, the former Yugoslav Republic of','Moldova, Republic of','Bosnia and Herzegovina','Palestine, State of',\
    'Venezuela, Bolivarian Republic of',"Korea, Democratic People's Republic of",'Russian Federation',\
    'Congo, Democratic Republic of the', 'Central African Republic','Syrian Arab Republic',\
    'United States of America', 'United Kingdom of Great Britain and Northern Ireland',\
    'Taiwan, Province of China',"Lao People's Democratic Republic",'Tanzania, United Republic of',\
    'Korea, Republic of','Congo','Iran, Islamic Republic of']

    countries_to_rename=['Czech Republic', 'Bolivia', \
    'Kyrgyz Republic', "Cote d' Ivoire", 'Vietnam', 'The Gambia',\
     'North Macedonia', 'Moldova', 'Bosnia & Herzegovina', 'Palestine', \
     'Venezuela', 'North Korea', 'Russia',\
     'Dem. Rep of the Congo', 'Central African Rep', 'Syria',\
     'USA','United Kingdom',\
     'Taiwan', 'Laos', 'Tanzania',\
      'South Korea', 'Rep of the Congo', 'Iran']

    gpi_map = df.T.to_dict('list')
    for country, info in gpi_map.items():
        info.insert(0, year)
    gpi_dictionary = rename_country_to_iso_name(countries_to_rename, iso_names,
                                                gpi_map)
    return gpi_dictionary
Пример #5
0
 def download(self, workers=8):
     response = get_response(self.m3u8_url, use_proxy=self.use_proxy)
     segments = m3u8.loads(response.text).data['segments']
     self.playlist = list((self.base_url + s['uri']) for s in segments)
     self.total = len(self.playlist)
     Path.mkdir(PATH_TEMP, exist_ok=True)
     print(f'开始下载[{self.total}]:')
     self._download_playlist(workers)
     print('下载成功.')
     self._merge()
     print('合并成功.')
     shutil.rmtree(PATH_TEMP)
Пример #6
0
def get_gpi_index_dictionary(year, pages=default_pages):
	iso_names=['Czechia','Trinidad and Tobago','Macedonia, the former Yugoslav Republic of',\
	'Bosnia and Herzegovina', 'Bolivia, Plurinational State of','Kyrgyzstan',"Côte d'Ivoire",\
	'United Kingdom of Great Britain and Northern Ireland','Viet Nam','Moldova, Republic of', 'Monaco',\
	'Eswatini','Gambia','Palestine, State of','Venezuela, Bolivarian Republic of',\
	"Korea, Democratic People's Republic of",'Russian Federation','Central African Republic',\
	'Congo, Democratic Republic of the','Syrian Arab Republic','Taiwan, Province of China',\
	'United Arab Emirates',"Lao People's Democratic Republic",'Korea, Republic of',\
	'Tanzania, United Republic of','United States of America','Congo',\
	'Iran, Islamic Republic of']

	countries_to_rename=['Czech Republic', 'Trinidad & Tobago', 'Macedonia (FYR)', \
	'Bosnia & Herzegovina', 'Bolivia', 'Kyrgyz Republic', "Cote d' Ivoire",\
	'United Kingdom', 'Vietnam', 'Moldova', 'Swaziland', 'The Gambia', \
	'Palestine', 'Venezuela', 'North Korea', 'Russia', 'Central African Rep', \
	'Dem. Rep Congo', 'Syria', 'Taiwan', 'UAE', 'Laos', 'South Korea',\
	'Tanzania', 'USA', 'Rep of the Congo', 'Iran']

	response=get_response(index_url)
	if not response:
		return
	data=response.content
	data_file=write_data(to_file, data)

	df = read_pdf(data_file, pages=pages)
	columns_to_find=['country', 'score']
	columns_to_save=[]
	ending_to_save=[]
	for to_find in columns_to_find:
		for column in df.columns:
			if to_find in column.lower():
				columns_to_save.append(column)
				lower=column.lower()
				ending_to_save.append(lower[lower.find(to_find)+len(to_find):])
	ending_to_save=set(ending_to_save)

	binding=[('COUNTRY{}'.format(ending), 'SCORE{}'.format(ending)) for ending in ending_to_save]
	#binding=[('country', 'score'), ('country.1', 'score.1'), ('country.2', 'score.2')]
	gpi_dictionary={}
	frames=[]
	for country,value in binding:
		sub_df=df[[country, value]]
		sub_df.rename(index=str, columns={country: "country", value: "score"}, inplace=True)
		frames.append(sub_df)
	df = pd.concat(frames)
	df.dropna(subset=['score'], inplace=True)
	df = df[df.country != 'COUNTRY']
	df=df.set_index('country')
	gpi_map=df.T.to_dict('list')
	for country, info in gpi_map.items():
		info.insert(0, year)
	gpi_dictionary=rename_country_to_iso_name(countries_to_rename, iso_names, gpi_map)	
	return gpi_dictionary
Пример #7
0
def districts_by_state(abbrev):
    """
    Endpoint to get all the districts that belong to the given state.

    abbrev -- the usps abbreviation of the state to get districts from

    Returns a list of the districts that belong to the given state
    """

    district_list = District.query.filter(District.state == abbrev).all()
    if not district_list:
        return error("Item not found for id " + abbrev)
    return jsonify([get_response(district) for district in district_list])
Пример #8
0
def handle_state_search(query, result, ranks):
    """
    Handles searching the states

    Finds any state that matches the search query. If finds new state, appends
    any district, representative, and political party that falls under that
    state.

    The index of the item in result should match index of its rank in ranks.

    query   -- the query to search for
    result  -- the list to append results to
    ranks   -- the list to append ranks for results to
    """

    states = State.query.filter(
        or_(State.usps_abbreviation.ilike(query),
            State.name.ilike(query))).all()

    for state in states:
        districts = District.query.filter(
            District.state == state.usps_abbreviation).all()
        if districts:
            for district in districts:
                district_json = get_district_json(district_param=district,
                                                  state_param=state)
                if district_json is not None:
                    if district_json not in result:
                        result.append(district_json)
                        ranks.append(1)

                        rep = Representative.query.filter(
                            district.representative_id ==
                            Representative.bioguide)
                        rep = rep.first()
                        if rep:
                            rep_json = get_response(rep)
                            del rep_json['bills']
                            if rep_json is not None and rep_json not in result:
                                result.append(rep_json)
                                ranks.append(0)

                            party_json = get_party_json(
                                rep_party_id=rep.party_id)
                            if party_json is not None and\
                               party_json not in result:
                                result.append(party_json)
                                ranks.append(0)
                    else:
                        ranks[result.index(district_json)] += 1
Пример #9
0
def districts_by_id(abbrev, district_id):
    """
    Gets the district given its state's usps abbreviation and its district
    number.

    abbrev      -- the usps abbreviation of the district's state
    district_id -- the ID of the district

    Returns the district dictionary matching the usps abbreviation and ID
    """

    data = District.query.filter(District.state == abbrev.upper(),
                                 District.id == district_id).first()
    if not data:
        return error("Item not found for id " + abbrev + " and " + district_id)
    return jsonify(get_response(data))
Пример #10
0
def handle_party_search(query, result, ranks):
    """
    Handles searching the political parties

    Finds any political party that matches the given query. If finds a new
    political party, appends it to the result, appends 1 for its rank, and
    appends all representatives and districts that belong to party. If finds
    a party that is already in result, increments its rank in ranks

    The index of the item in result should match index of its rank in ranks.

    query   -- the query to search for
    result  -- the list to append results to
    ranks   -- the list to append ranks for results to
    """

    parties = PoliticalParty.query.filter(
        or_(PoliticalParty.name.ilike(query), PoliticalParty.path.ilike(query),
            PoliticalParty.chair.ilike(query),
            PoliticalParty.formation_date.ilike(query),
            PoliticalParty.twitter_handle.ilike(query),
            PoliticalParty.youtube.ilike(query))).all()

    for party in parties:
        party_json = get_party_json(party_param=party)
        if party_json is not None:
            if party_json not in result:
                result.append(party_json)
                ranks.append(1)

                for rep in Representative.query \
                        .filter(party.id == Representative.party_id).all():
                    rep_json = get_response(rep)
                    del rep_json['bills']
                    if rep_json is not None and rep_json not in result:
                        result.append(rep_json)
                        ranks.append(0)
                    district_json = get_district_json(
                        rep_bioguide=rep.bioguide)
                    if district_json is not None and \
                       district_json not in result:
                        result.append(district_json)
                        ranks.append(0)
            else:
                ranks[result.index(party_json)] += 1
Пример #11
0
def handle_rep_search(query, result, ranks):
    """
    Handles searching the representatives

    Finds any representative that matches the given query. If finds a new
    representative, appends it to result, appends a 1 for its rank, adds the
    rep's political party and district to the results with a rank of 0 if they
    are not already present. If finds a rep that is already in result,
    increments its rank in ranks.

    The index of the item in result should match the index of its rank in ranks

    query   -- the query to search for
    result  -- the list to append results to
    ranks   -- the list to append ranks for results to
    """

    reps = Representative.query.filter(
        or_(Representative.firstname.ilike(query),
            Representative.lastname.ilike(query),
            Representative.state.ilike(query),
            Representative.district.ilike(query),
            Representative.twitter.ilike(query),
            Representative.youtube.ilike(query))).all()

    for rep in reps:
        item = get_response(rep)
        del item['bills']
        if item is not None:
            if item not in result:
                result.append(item)
                ranks.append(1)

                party_json = get_party_json(rep_party_id=rep.party_id)
                if party_json is not None and party_json not in result:
                    result.append(party_json)
                    ranks.append(0)

                district_json = get_district_json(rep_bioguide=rep.bioguide)
                if district_json is not None and district_json not in result:
                    result.append(district_json)
                    ranks.append(0)
            else:
                ranks[result.index(item)] += 1
Пример #12
0
def handle_district_search(query, result, ranks):
    """
    Handles searching the districts

    Finds any district that matches the given query. If finds a new district,
    appends it to result, appends a 1 for its rank. If the district has a
    representative, appends that representative and the reps political party to
    result if they are not already present. If finds a district that is already
    in result, increments its rank in ranks.

    The index of the item in result should match index of its rank in ranks.

    query   -- the query to search for
    result  -- the list to append results to
    ranks   -- the list to append ranks for results to
    """

    districts = District.query.filter(or_(
        District.alpha_num.ilike(query))).all()

    for district in districts:
        district_json = get_district_json(district_param=district)
        if district_json is not None:
            if district_json not in result:
                result.append(district_json)
                ranks.append(1)

                rep = Representative.query.filter(
                    district_json['representative_id'] ==
                    Representative.bioguide).first()
                if rep:
                    rep_json = get_response(rep)
                    del rep_json['bills']
                    if rep_json is not None and rep_json not in result:
                        result.append(rep_json)
                        ranks.append(0)
                    party_json = get_party_json(rep_party_id=rep.party_id)
                    if party_json is not None and party_json not in result:
                        result.append(party_json)
                        ranks.append(0)
            else:
                ranks[result.index(district_json)] += 1
Пример #13
0
def representatives_by_page(num):
    """
    Gets the representatives given the page number.

    Provides 25 representatives per page, ordering the representatives by their
    bioguide ID

    num -- the page number to get

    Returns a list of representative dictionaries
    """

    num = int(num)
    if num < 0:
        response = jsonify({"Error": "Item Not Found."})
        response.status_code = 404
        return response

    offset = num * 25
    query = Representative.query.order_by(Representative.bioguide)
    query = query.offset(offset).limit(25)
    return jsonify([get_response(rep) for rep in query.all()])
Пример #14
0
    def __init__(self, quest, ans, pre_query=False):
        self._quest = quest
        self._ans = ans
        self._pre_query = pre_query

        # Normalize
        self._ans = [opt.lower() for opt in self._ans]

        self._responses = [None] * QUERY_TYPES

        # create list of all queries
        new_quest = quest.replace("NOT", "")
        self._queries = [new_quest]
        self._queries += [
            ''.join([new_quest, ' %s' % choice]) for choice in self._ans
        ]

        # pre query if needed
        if self._pre_query:
            self._responses = [
                util.get_response(util.google_query(query))
                for query in self._queries
            ]
Пример #15
0
def get_hdi_index_dictionary(year,index_id):
	iso_names=['Bolivia, Plurinational State of','Congo, Democratic Republic of the','Micronesia, Federated States of',
	'United Kingdom of Great Britain and Northern Ireland', 'Hong Kong', 'Iran, Islamic Republic of',
	"Korea, Democratic People's Republic of", 'Moldova, Republic of','Macedonia, the former Yugoslav Republic of','Eswatini',
	'Tanzania, United Republic of','United States of America','Venezuela, Bolivarian Republic of']

	countries_to_rename=['Bolivia (Plurinational State of)','Congo (Democratic Republic of the)','Micronesia (Federated States of)',
 	'United Kingdom', 'Hong Kong, China (SAR)','Iran (Islamic Republic of)','Korea (Republic of)','Moldova (Republic of)',
 	'The former Yugoslav Republic of Macedonia','Eswatini (Kingdom of)','Tanzania (United Republic of)',
 	'United States','Venezuela (Bolivarian Republic of)']

	year=int(year)
	#for the moment we will take only HDI index id
	result=get_response(index_data_url)
	json_data=result.json()
	country_score=dict()
	if not index_id:
		return country_score
	for country_data in json_data:
		if country_data.get('id')==index_id:
			if country_data.get('year')==year:
				country_score[country_data.get('country')]=[year, round(country_data.get('value'),2)]
	country_score=rename_country_to_iso_name(countries_to_rename, iso_names, country_score)				
	return  country_score
Пример #16
0
def party_filter():
    """
    Endpoint to get political parties based on the filter query given by the
    'query' parameter.

    The query parameter should be a JSON formatted with the following fields:
    - social    -- the types of social media. Should be YT, T, Y, Neither
    - color     -- the party color
    - date      -- the range of dates for the formation of the party given as
                   two integers (ex: 1776-2018)
    - name      -- the range of party names given as two letters (ex: 'A-L')
    - order_by  -- how to order the results. Should be name_asc, name_desc,
                   chair_name_asc, or chair_name_desc

    Returns a list of the political parties that match the given query
    """

    try:
        filter_query = request.args.get('filter')
        filter_query = str(filter_query)
        filter_query = json.loads(filter_query)
    except Exception:
        return error("Filter Query Invalid")

    social = 'None'
    color = 'None'
    formation_date = 'None'
    name = ['a', 'z']
    order_by = 'None'

    if 'social' in filter_query:
        social = str(filter_query['social'])
    if 'color' in filter_query:
        color = str(filter_query['color'])
    if 'date' in filter_query:
        formation_date = filter_query['date'].split("-")
    if 'name' in filter_query:
        name = str(filter_query['name']).lower().split('-')
    if 'order_by' in filter_query:
        order_by = str(filter_query['order_by'])

    filtered_result = PoliticalParty.query
    if social == 'YT':
        filtered_result = filtered_result.filter(
            PoliticalParty.youtube != '', PoliticalParty.twitter_handle != '')
    elif social == 'T':
        filtered_result = filtered_result.filter(
            PoliticalParty.twitter_handle != '')
    elif social == 'Y':
        filtered_result = filtered_result.filter(PoliticalParty.youtube != '')
    elif social != 'None':
        filtered_result = filtered_result.filter(
            PoliticalParty.youtube == '', PoliticalParty.twitter_handle == '')

    if color != 'None':
        color = color.title()
        filtered_result = filtered_result.join(PartyColor).filter(
            PartyColor.color == color)

    result = None
    if order_by == 'name_asc':
        filtered_result = filtered_result.order_by(PoliticalParty.name.asc())
    elif order_by == 'name_desc':
        result = [get_response(party) for party in filtered_result.all()]
        result = sorted(result, key=party_chair)
        result = list(reversed(result))
    elif order_by == 'chair_name_asc':
        result = [get_response(party) for party in filtered_result.all()]
        result = sorted(result, key=party_chair)
    elif order_by == 'chair_name_desc':
        filtered_result = filtered_result.order_by(PoliticalParty.chair.desc())
    else:
        filtered_result = filtered_result.order_by(PoliticalParty.id)

    # Delete Bills from the result
    if result is None:
        result = [get_response(party) for party in filtered_result.all()]
    for party in result:
        for rep in party['representatives']:
            del rep['bills']

    if formation_date != 'None' and formation_date[0] != 'None':
        # Filter to parties formed between the given years

        date_begin = int(formation_date[0])
        date_end = int(formation_date[1])

        def party_formed_between(party):
            year = int(party['formation_date'].split()[-1])
            return date_begin <= year <= date_end

        result = filter(party_formed_between, result)

    # Filter the names of the parties
    return jsonify(
        filter(lambda n: name[0] <= n['name'].lower()[0] <= name[1], result))
Пример #17
0
def handle_links(links: Dict) -> Union[Dict, None]:
    """Returns URL to pdf if it's available"""
    # TODO make this async
    # Link is a field in the Crossref metadata
    # refactor pseudo code
    # first get the file ending
    # then call a checker
    # then add to list if True
    supported_endings = ["pdf", "xml"]
    found = False
    pdf_urls: List[str] = []
    xml_urls: List[str] = []
    for link in links:
        url = link["URL"]
        ending = url.split(".")[-1]
        print(f"ending:{ending}")
        pdf_duplicate = False
        xml_duplicate = False
        # Check for duplicates
        if len(pdf_urls) > 0 or len(xml_urls) > 0:
            for pdf_url in pdf_urls:
                if pdf_url == url:
                    pdf_duplicate = True
                    continue
            for xml_url in xml_urls:
                if xml_url == url:
                    xml_duplicate = True
                    continue
        if pdf_duplicate or xml_duplicate:
            continue
        if ending == "pdf":
            r = util.get_response(url)
            if r is not False and util.check_if_pdf(r):
                found = True
                pdf_urls.append(url)
            else:
                continue
        if ending == "xml":
            r = util.get_response(url)
            if r is not False and util.check_if_xml(r):
                found = True
                xml_urls.append(url)
            else:
                continue
        if ending not in supported_endings:
            print(f"URL ending not recognized: {link}")
            # Test to see if it serves PDF or XML anyway
            r = util.get_response(url)
            if r is not False:
                if util.check_if_pdf(r):
                    found = True
                    pdf_urls.append(url)
                elif util.check_if_xml(r):
                    found = True
                    xml_urls.append(url)
            else:
                continue
    if found is False:
        print("No fulltext links found")
        return None
    else:
        return dict(pdf_urls=pdf_urls, xml_urls=xml_urls)
Пример #18
0
def get_fsi_index_dictionary(year):

    iso_names = [
        'Bolivia, Plurinational State of',
        'Cabo Verde',
        'Congo, Democratic Republic of the',
        'Congo',
        'Côte d\'Ivoire',
        #'Czechia',
        'Guinea-Bissau',
        'Iran, Islamic Republic of',
        'Israel',
        'Kyrgyzstan',
        "Lao People's Democratic Republic",
        'Macedonia, the former Yugoslav Republic of',
        'Micronesia, Federated States of',
        'Moldova, Republic of',
        "Korea, Democratic People's Republic of",
        'Russian Federation',
        'Slovakia',
        'Korea, Republic of',
        #'Eswatini',
        'Syrian Arab Republic',
        'Tanzania, United Republic of',
        'United Kingdom of Great Britain and Northern Ireland',
        'United States of America',
        'Venezuela, Bolivarian Republic of',
        'Viet Nam'
    ]
    #add check if year is less than 2019 add to iso 'Czechia' and to rename list 'Czech Republic'
    #'Czechia'<-'Czech Republic'
    #'Eswatini' < - 'Swaziland'
    countries_to_rename = [
        'Bolivia',
        'Cape Verde',
        'Congo Democratic Republic',
        'Congo Republic',
        "Cote d'Ivoire",
        #'Czech Republic'
        'Guinea Bissau',
        'Iran',
        'Israel and West Bank',
        'Kyrgyz Republic',
        'Laos',
        'Macedonia',
        'Micronesia',
        'Moldova',
        'North Korea',
        'Russia',
        'Slovak Republic',
        'South Korea',
        #'Swaziland',
        'Syria',
        'Tanzania',
        'United Kingdom',
        'United States',
        'Venezuela',
        'Vietnam'
    ]

    response = get_response(last_available_index_url.format(year, year),
                            ignore_404=True)
    if response.status_code == 404:
        response = get_response(previous_index_url.format(year, year))
    if not response:
        return None
    fsi_data = response.content
    data_file = write_data(to_file.format(year), fsi_data)

    df = pd.read_excel(data_file)
    fsi_dictionary = dict()
    for index, row in df.iterrows():
        fsi_dictionary[row.Country] = [row.Year.year, round(row.Total, 2)]

    fsi_dictionary = rename_country_to_iso_name(countries_to_rename, iso_names,
                                                fsi_dictionary)
    return fsi_dictionary
Пример #19
0
def districts_filter():
    """
    Endpoint to get the districts based on the filter query given the 'query'
    parameter.

    The query parameter should be a JSON formatted with the following fields:
    - state         -- the usps abbreviation of the district's state
    - population    -- the range of populations given as two integers
                       (ex: 500000-1000000)
    - median_age    -- the range of median ages given as two floats
                       (ex: 46.6-50)
    - order_by      -- how the results should be ordered. Should be state_asc,
                       state_desc, population_asc, or population_desc

    Returns a list of the districts that match the given query
    """

    try:
        filter_query = request.args.get('filter')
        filter_query = str(filter_query)
        filter_query = json.loads(filter_query)
    except Exception:
        return error("Filter Query Invalid")

    order_by = 'state_asc'

    filtered_result = District.query
    if 'state' in filter_query and filter_query['state'] != 'None':
        filtered_result = filtered_result.filter(
            District.state == str(filter_query['state']))

    if 'population' in filter_query and filter_query['population'] != 'None':
        population = str(filter_query['population']).split('-')
        filtered_result = filtered_result.filter(
            int(population[0]) <= District.population)
        filtered_result = filtered_result.filter(
            District.population < int(population[1]))

    if 'median_age' in filter_query and filter_query['median_age'] != 'None':
        median_age = str(filter_query['median_age']).split('-')
        filtered_result = filtered_result.filter(
            float(median_age[0]) <= District.median_age)
        filtered_result = filtered_result.filter(
            District.median_age < float(median_age[1]))

    if 'order_by' in filter_query:
        order_by = str(filter_query['order_by'])

    result = None
    if order_by == 'state_desc':
        result = [get_response(rep) for rep in filtered_result.all()]
        result = sorted(result, key=district_id)
        result = sorted(result, key=lambda district: district['state'])
        result = list(reversed(result))
    elif order_by == 'population_desc':
        filtered_result = filtered_result.order_by(District.population.desc())
    elif order_by == 'population_asc':
        filtered_result = filtered_result.order_by(District.population.asc())
    else:
        result = [get_response(rep) for rep in filtered_result.all()]
        result = sorted(result, key=district_id)
        result = sorted(result, key=lambda district: district['state'])

    if result != None:  # noqa
        return jsonify(result)
    return jsonify([get_response(rep) for rep in filtered_result.all()])
Пример #20
0
    parser.add_argument(
        '--input',
        help=
        'input phrases as a list form, i.e [qwestion1, qwestion2, qwestion3]')

    args = parser.parse_args()

    x = ast.literal_eval(args.input)

    words = create_sequence_from_sentence([
        'what is better amazon or itunes for showing',
        'what is better mouse or rat', 'what is easier to make bread o pizza'
    ])
    model = TaggerFactory.load(PATH_TO_PRETRAINED + MODEL_NAME)
    tags = model.predict_tags_from_words(words)

    objects_list = []
    for elem in list(zip(words, tags)):
        objects = get_objects(elem[0], elem[1])
        assert len(objects) >= 2, "We have %d objects to compare" % (
            len(objects))
        objects_list.append((objects[0], objects[1]))

    for obj0, obj1 in objects_list:
        response = get_response(obj0, obj1, False)
        response_json = response.json()
        Merlin = diviner()
        Merlin.create_from_json(response_json)
        Merlin.generate_advice()

    print('\nThe end.')
Пример #21
0
def query_ombd(title):
    api_key = "47fec2f"  # 1000 calls per day
    url = "http://www.omdbapi.com/?t=%s&apikey=%s" % (title, api_key)
    # print_response(url)
    parse_omdb_data(get_response(url).json())
Пример #22
0
def representatives_filter():
    """
    Gets representatives based on the filter query given by the 'query'
    parameter.

    The query parameter should be JSON formatted with the following fields:
    - "state"       -- the usps abbreviation of a state
    - "last_name"   -- the range of last names given as two letters (ex: 'A-L')
    - "party_id"    -- the ID of a political party
    - "votes_pct"   -- the range of vote percentages given as two floats
                       (ex: 60-69.99)
    - "order_by"    -- how to order the results. Should be last_asc, last_desc,
                       votes_pct_asc, or votes_pct_desc

    Returns list of representative dictionaries that match the given filter or
    an error if the query is invalid
    """

    try:
        filter_query = request.args.get('filter')
        filter_query = str(filter_query)
        filter_query = json.loads(filter_query)
    except Exception:
        return error("Filter Query Invalid")

    state = 'None'
    last_name = 'None'
    party_id = 'None'
    votes_pct = 'None'
    order_by = 'None'
    if 'state' in filter_query:
        state = str(filter_query['state'])

    if 'party_id' in filter_query:
        party_id = filter_query['party_id']

    if 'last_name' in filter_query:
        last_name = str(filter_query['last_name']).lower().split('-')

    if 'votes_pct' in filter_query:
        votes_pct = str(filter_query['votes_pct']).split('-')

    if 'order_by' in filter_query:
        order_by = str(filter_query['order_by'])

    filtered_result = Representative.query
    if state != 'None':
        filtered_result = filtered_result.filter(Representative.state == state)

    if party_id != 'None':
        filtered_result = filtered_result.filter(
            Representative.party_id == int(party_id))

    if votes_pct != 'None' and votes_pct[0] != 'None':
        filtered_result = filtered_result.filter(
            Representative.votes_with_party_pct >= float(votes_pct[0]),
            Representative.votes_with_party_pct < float(votes_pct[1]))

    if order_by != 'None':
        if (order_by == 'last_asc'):
            filtered_result = filtered_result.order_by(
                Representative.lastname.asc())
        elif (order_by == 'last_desc'):
            filtered_result = filtered_result.order_by(
                Representative.lastname.desc())
        elif (order_by == 'votes_pct_asc'):
            filtered_result = filtered_result.order_by(
                Representative.votes_with_party_pct.asc())
        else:
            filtered_result = filtered_result.order_by(
                Representative.votes_with_party_pct.desc())
    else:
        filtered_result = filtered_result.order_by(
            Representative.lastname.asc())

    filtered_result = filtered_result.all()
    filtered_dict_list = [get_response(rep) for rep in filtered_result]

    if last_name != 'None':
        return jsonify(
            filter(
                lambda s: s['lastname'][0].lower() >= last_name[0] and
                s['lastname'][0].lower() <= last_name[1], filtered_dict_list))
    else:
        return jsonify(filtered_dict_list)
Пример #23
0
 def get_query(self, query_index):
     if self._responses[query_index] is not None:
         return self._responses[query_index]
     self._responses[query_index] = util.get_response(
         util.google_query(self._queries[query_index]))
     return self._responses[query_index]