def get_geo_location(ip): req = urllib2.Request("http://freegeoip.net/json/" + ip) opener = urllib2.build_opener() try: f = opener.open(req) except: return None json_resp = json.loads(f.read()) city = json_resp["city"] # city = "Viana" state = json_resp["region_name"] # state = "Espírito Santo" # state = "Maranhão" # first try to find the exact city within the state bra_state = Bra.query.filter_by(name_pt=state).filter( func.char_length(Bra.id) == 2).first() bra_cities = Bra.query.filter_by(name_pt=city).filter( func.char_length(Bra.id) == 9) if bra_state: if bra_cities.count() == 1: return bra_cities.first() elif bra_cities.count() > 1: return bra_cities.filter(Bra.id.like(bra_state.id + '%')).first() return None return None
def get_geo_location(ip): req = urllib2.Request("http://freegeoip.net/json/" + ip) opener = urllib2.build_opener() try: f = opener.open(req) except: return None json_resp = json.loads(f.read()) city = json_resp["city"] # city = "Viana" state = json_resp["region_name"] # state = "Espírito Santo" # state = "Maranhão" # first try to find the exact city within the state bra_state = Bra.query.filter_by(name_pt=state).filter( func.char_length(Bra.id) == 3).first() bra_cities = Bra.query.filter_by(name_pt=city).filter( func.char_length(Bra.id) == 9) if bra_state: if bra_cities.count() == 1: return bra_cities.first() elif bra_cities.count() > 1: return bra_cities.filter(Bra.id.like(bra_state.id+'%')).first() return None return None
def hierarchy(self): prods = [] _2dig = self.attr_cls.query.get(self.attr.id[:2]) prods.append(_2dig) '''if this is a 2 digit product show only its children, on the other hand if its a 4 or 6 digit product show the single 4 digit prod and all 6 digit children with itself included''' if self.attr == _2dig: children = self.attr_cls.query \ .filter(self.attr_cls.id.startswith(_2dig.id)) \ .filter(func.char_length(self.attr_cls.id) == 6) \ .order_by("id") \ .all() prods = prods + list(children) else: _4dig = self.attr_cls.query.get(self.attr.id[:6]) prods.append(_4dig) children = self.attr_cls.query \ .filter(self.attr_cls.id.startswith(_4dig.id)) \ .filter(func.char_length(self.attr_cls.id) == 8) \ .order_by("id") \ .all() prods = prods + list(children) return prods
def get_translations(descend=False): order_by_query = desc(func.char_length( Translation.translated_text)) if descend else func.char_length( Translation.translated_text) return [ x.json() for x in db.session.query(Translation).order_by(order_by_query).all() ]
def length_json(): session = db.Session() results = defaultdict(lambda: 0) results.update( session.query( func.char_length(db.History.cmd).label("length"), func.count("length")).group_by("length").all()) if not results: return jsonify({}) flat_values = [] for length, number in results.items(): flat_values.extend([length] * number) data = { "average": float("%.2f" % statistics.mean(flat_values)), "median": statistics.median(flat_values), "series": [results[length] for length in range(1, max(results.keys()) + 1)], } return jsonify(data)
def dispatch_request(self): form = NewExportFileForm(request.form) form.template.query = self.query(ExportTemplate) form.session.query = self.query(Session) form.filters.platform_id.query = self.query(Platform) form.filters.platform_group_id.query = self.query(PlatformGroup) form.filters.platform_country.choices = [(v, v) for ( v, ) in self.query(Platform.country).order_by(Platform.country).filter( func.char_length(Platform.country) == 2).filter( or_( Platform.type == PlatformType.TVOD, Platform.type == PlatformType.SVOD, )).filter(Platform.ignore_in_exports.is_(False)).distinct()] if request.method == "POST" and form.validate(): file = ExportFile( path=form.path.data, session=form.session.data, template=form.template.data, filters=form.filters.render(), ) file.schedule(celery=self.celery) self.session.add(file) self.session.commit() return redirect(url_for(".show_export_file", id=file.id)) ctx = {} ctx["form"] = form return render_template("exports/files/new.html", **ctx)
def prev(self): c = self.__class__ return self.query.filter(c.id < self.id) \ .filter(~c.id.in_(excluded_countries)) \ .filter(c.id_3char != None) \ .filter(func.char_length(c.id)==len(self.id)) \ .order_by(c.id.desc()).first()
def prev(self): c = self.__class__ return ( self.query.filter(c.id < self.id) .filter(func.char_length(c.id) == len(self.id)) .order_by(c.id.desc()) .first() )
def test_constructor(self): try: func.current_timestamp('somearg') assert False except TypeError: assert True try: func.char_length('a', 'b') assert False except TypeError: assert True try: func.char_length() assert False except TypeError: assert True
def dump_project(self, project, session): print('Looking in project %s' % (project.name)) query = session.query(Result) \ .filter_by(project=project) \ .order_by(func.char_length(Result.shortcode), Result.shortcode) if self.after: query = query.filter(Result.datetime > self.after) count = query.count() if count == 0: return self.projects_count += 1 assert project.url_template.endswith('{shortcode}'), \ 'Writer only supports URL with prefix' # XXX: Use regex \{shortcode\}$ instead? site = project.url_template.replace('{shortcode}', '') self.fp = None self.writer = None last_filename = '' i = 0 for item in query: self.items_count += 1 i += 1 if i % 1000 == 0: print('%d/%d' % (i, count)) # we can do this as the query is sorted # so that item that would end up together # would returned together filename = self.get_filename(project, item) if filename != last_filename: self.close_fp() assert not os.path.isfile(filename), 'Target file %s already exists' % (filename) self.fp = self.get_fp(filename) self.writer = self.format(self.fp) self.writer.write_header(site) last_filename = filename self.writer.write_shortcode(item.shortcode, item.url, item.encoding) if not self.last_date or item.datetime > self.last_date: self.last_date = item.datetime if self.settings['delete']: session.delete(item) self.close_fp()
def cities_by_pop(value): Ybs = attrs.Ybs filters = [ Ybs.stat_id == 'pop', Ybs.stat_val >= value, Ybs.year == __latest_year__['stats'], func.char_length(Ybs.bra_id) == 9 ] res = Ybs.query.filter(*filters).with_entities(Ybs.bra_id).all() if res: return [row[0] for row in res] return res
def compute_stats(metric, shows, limit=None, offset=None, sort="desc", depth=None, filters=[]): cache_key = CAROUSEL_NS + "".join(([metric] + shows) + ([str(limit), str(offset),sort,str(depth)])) prev = cached_query(cache_key) if prev: return pickle.loads(prev) kwargs = {metric:"dummy"} kwargs[shows[0]] = 'show' for show in shows[1:]: kwargs[show] = "dummy" table = table_helper.select_best_table(kwargs, allowed_when_not, possible_tables) if not table: raise Exception("No Valid Table Available!") filters = [] show_columns = [getattr(table, show) for show in shows] metric_col = getattr(table, metric) i = 0 for show_column in show_columns: show=shows[i] if table in no_length_column: depth_val = depth or max_depth[show] filters.append(func.char_length(show_column) == depth_val ) elif show in max_depth: depth_val = depth or max_depth[show] filters.append(getattr(table, show + table_helper.LEN) == depth_val ) i+=1 if table in filters_map: filters += filters_map[table] growth_regex = re.match('(num_emp)_growth(_5)?', metric) VAL_THRESOLD = 10000 if growth_regex: orig_col_name = growth_regex.group(1) orig_col = getattr(table, orig_col_name) filters.append(orig_col >= VAL_THRESOLD) elif metric == "wage_avg" and len(shows) == 1 and shows[0] == "bra_id": # when looking at wage_avg for cities, only look at places # with >= 50k people cities = cities_by_pop(50000) filters.append(table.bra_id.in_(cities)) columns = show_columns + [metric_col] results = query_helper.query_table(table, columns, filters, order=metric, limit=limit, sort=sort, offset=offset) cached_query(cache_key, pickle.dumps(results)) return results
def __init__(self, store, file_id): ContentAddressLookup.__init__(self, store, file_id) blob_db = self.blob_db() if blob_db.exists(): q = select([ func.char_length(blob.c.content).label('size'), blob.c.created_dt ]).where(blob.c.file_id == self.file_id) result = blob_db.execute(q).fetchall() if len(result) == 1: self.size = result[0].size self.created_dt = result[0].created_dt elif len(result) != 0: raise AssertionError('PK?: %r' % self.file_id)
def look_in_db(search_term): base_q = name_tbl.query.filter_by(lang=lang) if len_greater_than: base_q = base_q.filter(func.char_length(name_tbl.id) > len_greater_than) exact_match = base_q.filter_by(name=search_term).first() if exact_match: return [exact_match] starts_with_match = base_q.filter(name_tbl.name.startswith(search_term)).all() if len(starts_with_match): return starts_with_match if attr_tbl_backref == "sitc" or attr_tbl_backref == "hs": return base_q.filter(name_tbl.name.like("%"+search_term+"%")).all() else: return []
def update_startup_info(session): """ Insert tracking information for each startup for today into database """ startups = session.query( CBCompany, ALCompany).filter(CBCompany.name == ALCompany.name).filter( CBCompany.twitter is not None).filter( func.char_length(CBCompany.twitter) > 0).all() for startup in startups: al_id = startup.ALCompany.angellist_id print al_id count = session.query(StartupInfo).filter( StartupInfo.al_id == al_id).filter( StartupInfo.info_date == date.today()).count() if count > 0: continue else: record = StartupInfo() al_url = "https://api.angel.co/1/startups/%d?access_token=%s" % \ (al_id, config.ANGELLIST_TOKEN) resp = urllib2.urlopen(al_url) profile = json.loads(resp.read()) record.info_date = date.today() record.al_id = al_id if (not "follower_count" in profile) or (not 'quality' in profile): prec = session.query(StartupInfo).filter( StartupInfo.al_id == al_id).filter( StartupInfo.info_date == (date.today() - timedelta(1))).first() record.al_follower = prec.al_follower record.al_quality = prec.al_quality else: record.al_follower = profile['follower_count'] record.al_quality = profile['quality'] twitter_profile = socialmedia.twitter_user_show( startup.CBCompany.twitter) record.twitter_follower = twitter_profile['followers_count'] record.bitly_click = socialmedia.bitly_click_count( startup.ALCompany.bitly_hash) session.add(record) session.commit()
def update_startup_info(session): """ Insert tracking information for each startup for today into database """ startups = session.query(CBCompany, ALCompany).filter( CBCompany.name==ALCompany.name).filter( CBCompany.twitter is not None).filter( func.char_length(CBCompany.twitter)>0).all() for startup in startups: al_id = startup.ALCompany.angellist_id print al_id count = session.query(StartupInfo).filter(StartupInfo.al_id==al_id).filter( StartupInfo.info_date==date.today()).count() if count > 0: continue else: record = StartupInfo() al_url = "https://api.angel.co/1/startups/%d?access_token=%s" % \ (al_id, config.ANGELLIST_TOKEN) resp = urllib2.urlopen(al_url) profile = json.loads(resp.read()) record.info_date = date.today() record.al_id = al_id if (not "follower_count" in profile) or (not 'quality' in profile): prec = session.query(StartupInfo).filter(StartupInfo.al_id==al_id ).filter(StartupInfo.info_date==(date.today()-timedelta(1))).first() record.al_follower = prec.al_follower record.al_quality = prec.al_quality else: record.al_follower = profile['follower_count'] record.al_quality = profile['quality'] twitter_profile = socialmedia.twitter_user_show(startup.CBCompany.twitter) record.twitter_follower = twitter_profile['followers_count'] record.bitly_click = socialmedia.bitly_click_count( startup.ALCompany.bitly_hash) session.add(record) session.commit()
def look_in_db(search_term): base_q = name_tbl.query.filter_by(lang=lang) if len_greater_than: base_q = base_q.filter( func.char_length(name_tbl.id) > len_greater_than) exact_match = base_q.filter_by(name=search_term).first() if exact_match: return [exact_match] starts_with_match = base_q.filter( name_tbl.name.startswith(search_term)).all() if len(starts_with_match): return starts_with_match if attr_tbl_backref == "sitc" or attr_tbl_backref == "hs": return base_q.filter( name_tbl.name.like("%" + search_term + "%")).all() else: return []
def searchFood(searchTerm, brandTerm, Food,FoodKey): searchTermList = searchTerm.split() keywords = [] for each in searchTermList: keywords.append(each.lower()) a = FoodKey.query.filter(FoodKey.word.in_(keywords)).subquery() q = Food.query.filter(Food.id==a.c.keyid).group_by(Food.id).having(func.count(distinct(a.c.word)) == len(keywords)) brandTermList = brandTerm.split() orTerm = "|" brandTerm = orTerm.join(brandTermList) brandTerm.rstrip("|") if brandTerm != "": q = q.filter("Food.source @@ to_tsquery(:searchTerm)").params(searchTerm=brandTerm) if len(keywords) == 1: q = q.order_by(asc(func.char_length(Food.tag))).limit(40) foodIDs = [each.id for each in q] return foodIDs
def parse_bras(bra_str): if ".show." in bra_str: # the '.show.' indicates that we are looking for a specific nesting bar_id, nesting = bra_str.split(".show.") # filter table by requested nesting level bras = Bra.query \ .filter(Bra.id.startswith(bra_id)) \ .filter(func.char_length(Attr.id) == nesting).all() bras = [b.serialize() for b in bras] elif "." in bra_str: # the '.' indicates we are looking for bras within a given distance bra_id, distance = bra_str.split(".") bras = exist_or_404(Bra, bra_id) neighbors = bras.get_neighbors(distance) bras = [g.bra.serialize() for g in neighbors] else: # we allow the user to specify bras separated by '+' bras = bra_str.split("+") # Make sure the bra_id requested actually exists in the DB bras = [exist_or_404(Bra, bra_id).serialize() for bra_id in bras] return bras
class CordexDataset(Base): __tablename__ = 'cordex_dataset' dataset_id = Column(UUID, primary_key=True) model_id = Column('model_id', Text) time_frequency = Column('frequency', Text) #institute = Column('institute_id', Text) institute = Column('institute', Text) #domain = Column('cordex_domain', Text) domain = Column('domain', Text) experiment = Column('experiment', Text) #rcm_version = Column('rcm_version_id', Text) rcm_version = Column('rcm_version', Text) #driving_model = Column('driving_model_id', Text) driving_model = Column('driving_model', Text) #experiment = Column('driving_experiment_name', Text) driving_experiment = Column('driving_experiment', Text) #ensemble = Column('driving_model_ensemble_member', Text) ensemble = Column('ensemble', Text) rcm_name = column_property(f.substr(model_id, f.char_length(institute) + 2))
def load_microsoft_suggestions_by_lang(active_messages, language, origin_language = None): """ Attempt to translate all the messages to a language """ if language == 'en': return True, 0 found = False for ms_language in microsoft_translator.languages: if ms_language == language: found = True if not found: return True, 0 # Focus on those not available in Microsoft last_month = datetime.datetime.utcnow() - datetime.timedelta(days=32) row = db.session.query(func.sum(func.char_length(TranslationExternalSuggestion.value))).filter(TranslationExternalSuggestion.origin_language == u'en', TranslationExternalSuggestion.engine==u'microsoft', TranslationExternalSuggestion.created>=last_month).first() # we don't have the right size of the human_key. we can use the value, but it's not accurate. # we have 2M per month. So we select 1.5M max (which is around 1.7M real) if row[0] > 1500000: return False, 0 return _load_generic_suggestions_by_lang(active_messages, language, origin_language, 'microsoft', translation_func = _mstranslate, bulk_messages=True)
def get_user(): ''' 根据id获取用户信息 Note: id不存在时返回当前登录用户信息 ''' id = request.args.get('id', -1, type=int) username = request.args.get('username', '') keyword = request.args.get('keyword', '') offset = request.args.get('offset', 0, type=int) limit = request.args.get('limit', 10, type=int) if id != -1: user = Cache.get_user(id) if user == None: return not_found("找不到该用户") return jsonify(user.to_json()) if username != '': user = User.query.filter_by(username=username).first_or_404() return jsonify(user.to_json()) if keyword != '': users = User.query.filter(User.username.like('%' + keyword + '%')) users = users.order_by(func.char_length(User.username)) users = users.offset(offset).limit(limit) users = [u.to_json() for u in users] return jsonify(users) return jsonify(g.user.to_json())
def iterate(self, session=None) -> Iterator[ExportFactoryTemplateContext]: from . import Platform, PlatformGroup from .enums import PlatformType query = session.query if self.iterator is ExportFactoryIterator.PLATFORMS: platforms = (query(Platform).filter( or_( Platform.type == PlatformType.TVOD, Platform.type == PlatformType.SVOD, )).filter(Platform.ignore_in_exports.is_(False))) for platform in platforms: yield {"platform": platform} elif self.iterator is ExportFactoryIterator.GROUPS: for group in query(PlatformGroup): yield {"group": group} elif self.iterator is ExportFactoryIterator.COUNTRIES: countries = (query(Platform.country).order_by( Platform.country).filter( func.char_length(Platform.country) == 2).filter( or_( Platform.type == PlatformType.TVOD, Platform.type == PlatformType.SVOD, )).filter( Platform.ignore_in_exports.is_(False)).distinct()) # SQLA tends to return tuples even when there is only one column for (country, ) in countries: yield {"country": country} else: yield {}
def searchFood(searchTerm, brandTerm, Food, FoodKey): searchTermList = searchTerm.split() keywords = [] for each in searchTermList: keywords.append(each.lower()) a = FoodKey.query.filter(FoodKey.word.in_(keywords)).subquery() q = ( Food.query.filter(Food.id == a.c.keyid) .group_by(Food.id) .having(func.count(distinct(a.c.word)) == len(keywords)) ) brandTermList = brandTerm.split() orTerm = "|" brandTerm = orTerm.join(brandTermList) brandTerm.rstrip("|") if brandTerm != "": q = q.filter("Food.source @@ to_tsquery(:searchTerm)").params(searchTerm=brandTerm) if len(keywords) == 1: q = q.order_by(asc(func.char_length(Food.tag))).limit(40) foodIDs = [each.id for each in q] return foodIDs
def attrs(attr="bra", Attr_id=None, depth=None): Attr = globals()[attr.capitalize()] Attr_weight_mergeid = "{0}_id".format(attr) if attr == "bra": Attr_weight_tbl = Yb Attr_weight_col = "population" elif attr == "cnae": Attr_weight_tbl = Yi Attr_weight_col = "num_jobs" elif attr == "cbo": Attr_weight_tbl = Yo Attr_weight_col = "num_jobs" elif attr == "hs": Attr_weight_tbl = Ymp Attr_weight_col = "export_val" elif attr == "wld": Attr_weight_tbl = Ymw Attr_weight_col = "export_val" elif attr == "course_hedu": Attr_weight_tbl = Yc_hedu Attr_weight_col = "enrolled" elif attr == "university": Attr_weight_tbl = Yu Attr_weight_col = "enrolled" elif attr == "school": Attr_weight_tbl = Ys Attr_weight_col = "enrolled" elif attr == "course_sc": Attr_weight_tbl = Yc_sc Attr_weight_col = "enrolled" depths = {} depths["bra"] = [1, 3, 5, 7, 9] depths["cnae"] = [1, 3, 6] depths["cbo"] = [1, 4] depths["hs"] = [2, 6] depths["wld"] = [2, 5] depths["course_hedu"] = [2, 6] depths["university"] = [5] depths["course_sc"] = [2, 5] depths["school"] = [8] depth = request.args.get('depth', depth) order = request.args.get('order', None) offset = request.args.get('offset', None) limit = request.args.get('limit', None) if offset: offset = float(offset) limit = limit or 50 elif limit: offset = float(0) lang = request.args.get('lang', None) or g.locale ret = {} dataset = "rais" if Attr == Wld or Attr == Hs: dataset = "secex" elif Attr == Course_hedu or Attr == University: dataset = "hedu" elif Attr == Course_sc or Attr == School: dataset = "sc" elif Attr == Bra: dataset = "population" cache_id = "attrs:" + request.path + lang if depth: cache_id = cache_id + "/" + depth # first lets test if this query is cached cached_q = cached_query(cache_id) if cached_q and limit is None: ret = make_response(cached_q) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret # if an ID is supplied only return that if Attr_id: # the '.show.' indicates that we are looking for a specific nesting if ".show." in Attr_id: this_attr, ret["nesting_level"] = Attr_id.split(".show.") # filter table by requested nesting level attrs = Attr.query \ .filter(Attr.id.startswith(this_attr)) \ .filter(func.char_length(Attr.id) == ret["nesting_level"]).all() # the 'show.' indicates that we are looking for a specific nesting elif "show." in Attr_id: ret["nesting_level"] = Attr_id.split(".")[1] # filter table by requested nesting level attrs = Attr.query.filter( func.char_length(Attr.id) == ret["nesting_level"]).all() # the '.' here means we want to see all attrs within a certain distance elif "." in Attr_id: this_attr, distance = Attr_id.split(".") this_attr = Attr.query.get_or_404(this_attr) attrs = this_attr.get_neighbors(distance) else: attrs = [Attr.query.get_or_404(Attr_id)] ret["data"] = [fix_name(a.serialize(), lang) for a in attrs] # an ID/filter was not provided else: latest_year = __year_range__[dataset][-1] latest_month = False if "-" in latest_year: latest_year, latest_month = latest_year.split("-") latest_month = int(latest_month) latest_year = int(latest_year) conds = [ getattr(Attr_weight_tbl, "{0}_id".format(attr)) == Attr.id, Attr_weight_tbl.year == latest_year ] if latest_month: conds.append(Attr_weight_tbl.month == latest_month) query = db.session.query(Attr, Attr_weight_tbl).outerjoin( Attr_weight_tbl, and_(*conds)) if Attr == School: query = query.filter(Attr.is_vocational == 1) if depth: query = query.filter(func.char_length(Attr.id) == depth) else: query = query.filter(func.char_length(Attr.id).in_(depths[attr])) if order: direction = "asc" if "." in order: o, direction = order.split(".") else: o = order if o == "name": o = "name_{0}".format(lang) if o == Attr_weight_col: order_table = Attr_weight_tbl else: order_table = Attr if direction == "asc": query = query.order_by(asc(getattr(order_table, o))) elif direction == "desc": query = query.order_by(desc(getattr(order_table, o))) if limit: query = query.limit(limit).offset(offset) attrs_all = query.all() # just get items available in DB attrs_w_data = None if depth is None and limit is None: attrs_w_data = db.session.query(Attr, Attr_weight_tbl) \ .filter(getattr(Attr_weight_tbl, Attr_weight_mergeid) == Attr.id) \ .group_by(Attr.id) attrs_w_data = [a[0].id for a in attrs_w_data] attrs = [] # all_planning_regions = {} for i, a in enumerate(attrs_all): b = a[0].serialize() if a[1]: c = a[1].serialize() if Attr_weight_col in c: b[Attr_weight_col] = c[Attr_weight_col] else: b[Attr_weight_col] = 0 else: b[Attr_weight_col] = 0 a = b if attrs_w_data: a["available"] = False if a["id"] in attrs_w_data: a["available"] = True # if Attr_weight_col == "population" and len(a["id"]) == 9 and a["id"][:3] == "4mg": # if not all_planning_regions: # all_planning_regions = get_planning_region_map() # if a["id"] in all_planning_regions: # plr = all_planning_regions[a["id"]] # a["plr"] = plr if order: a["rank"] = int(i + offset + 1) if attr == "bra" and "id_ibge" not in a: a["id_ibge"] = False attrs.append(fix_name(a, lang)) ret["data"] = attrs ret = jsonify(ret) ret.data = gzip_data(ret.data) if limit is None and cached_q is None: cached_query(cache_id, ret.data) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret
def display_details(name): """Display detailed series information, ie. series show NAME""" from flexget.manager import Session with contextlib.closing(Session()) as session: name = normalize_series_name(name) # Sort by length of name, so that partial matches always show shortest matching title matches = (session.query(Series).filter(Series._name_normalized.contains(name)). order_by(func.char_length(Series.name)).all()) if not matches: console('ERROR: Unknown series `%s`' % name) return # Pick the best matching series series = matches[0] console('Showing results for `%s`.' % series.name) if len(matches) > 1: console('WARNING: Multiple series match to `%s`.' % name) console('Be more specific to see the results of other matches:') for s in matches[1:]: console(' - %s' % s.name) console(' %-63s%-15s' % ('Identifier, Title', 'Quality')) console('-' * 79) # Query episodes in sane order instead of iterating from series.episodes episodes = session.query(Episode).filter(Episode.series_id == series.id) if series.identified_by == 'sequence': episodes = episodes.order_by(Episode.number).all() elif series.identified_by == 'ep': episodes = episodes.order_by(Episode.season, Episode.number).all() else: episodes = episodes.order_by(Episode.identifier).all() for episode in episodes: if episode.identifier is None: console(' None <--- Broken!') else: console(' %s (%s) - %s' % (episode.identifier, episode.identified_by or 'N/A', episode.age)) for release in episode.releases: status = release.quality.name title = release.title if len(title) > 55: title = title[:55] + '...' if release.proper_count > 0: status += '-proper' if release.proper_count > 1: status += str(release.proper_count) if release.downloaded: console(' * %-60s%-15s' % (title, status)) else: console(' %-60s%-15s' % (title, status)) console('-' * 79) console(' * = downloaded') if not series.identified_by: console('') console(' Series plugin is still learning which episode numbering mode is ') console(' correct for this series (identified_by: auto).') console(' Few duplicate downloads can happen with different numbering schemes') console(' during this time.') else: console(' Series uses `%s` mode to identify episode numbering (identified_by).' % series.identified_by) console(' See option `identified_by` for more information.') if series.begin: console(' Begin episode for this series set to `%s`.' % series.begin.identifier)
def display_details(name): """Display detailed series information, ie. series show NAME""" from flexget.manager import Session with Session() as session: name = normalize_series_name(name) # Sort by length of name, so that partial matches always show shortest matching title matches = (session.query(Series).filter( Series._name_normalized.contains(name)).order_by( func.char_length(Series.name)).all()) if not matches: console('ERROR: Unknown series `%s`' % name) return # Pick the best matching series series = matches[0] console('Showing results for `%s`.' % series.name) if len(matches) > 1: console('WARNING: Multiple series match to `%s`.' % name) console('Be more specific to see the results of other matches:') for s in matches[1:]: console(' - %s' % s.name) console(' %-63s%-15s' % ('Identifier, Title', 'Quality')) console('-' * 79) # Query episodes in sane order instead of iterating from series.episodes episodes = session.query(Episode).filter( Episode.series_id == series.id) if series.identified_by == 'sequence': episodes = episodes.order_by(Episode.number).all() elif series.identified_by == 'ep': episodes = episodes.order_by(Episode.season, Episode.number).all() else: episodes = episodes.order_by(Episode.identifier).all() for episode in episodes: if episode.identifier is None: console(' None <--- Broken!') else: console(' %s (%s) - %s' % (episode.identifier, episode.identified_by or 'N/A', episode.age)) for release in episode.releases: status = release.quality.name title = release.title if len(title) > 55: title = title[:55] + '...' if release.proper_count > 0: status += '-proper' if release.proper_count > 1: status += str(release.proper_count) if release.downloaded: console(' * %-60s%-15s' % (title, status)) else: console(' %-60s%-15s' % (title, status)) console('-' * 79) console(' * = downloaded') if not series.identified_by: console('') console( ' Series plugin is still learning which episode numbering mode is ' ) console(' correct for this series (identified_by: auto).') console( ' Few duplicate downloads can happen with different numbering schemes' ) console(' during this time.') else: console( ' Series uses `%s` mode to identify episode numbering (identified_by).' % series.identified_by) console(' See option `identified_by` for more information.') if series.begin: console(' Begin episode for this series set to `%s`.' % series.begin.identifier)
def get_top_attr(self, tbl, val_var, attr_type, key, dataset): latest_year = __latest_year__[dataset] if key == "bra": length = 8 elif key == "isic" or key == "wld": length = 5 elif key == "cbo": length = 4 elif key == "hs": length = 6 if attr_type == "bra": agg = {'val_usd':func.sum, 'eci':func.avg, 'eci_wld':func.avg, 'pci':func.avg, 'val_usd_growth_pct':func.avg, 'val_usd_growth_pct_5':func.avg, 'val_usd_growth_val':func.avg, 'val_usd_growth_val_5':func.avg, 'distance':func.avg, 'distance_wld':func.avg, 'opp_gain':func.avg, 'opp_gain_wld':func.avg, 'rca':func.avg, 'rca_wld':func.avg, 'wage':func.sum, 'num_emp':func.sum, 'num_est':func.sum, 'ici':func.avg, 'oci':func.avg, 'wage_growth_pct':func.avg, 'wage_growth_pct_5':func.avg, 'wage_growth_val':func.avg, 'wage_growth_val_5':func.avg, 'num_emp_growth_pct':func.avg, 'num_emp_pct_5':func.avg, 'num_emp_growth_val':func.avg, 'num_emp_growth_val_5':func.avg, 'distance':func.avg, 'importance':func.avg, 'opp_gain':func.avg, 'required':func.avg, 'rca':func.avg} if self.id == "all": top = tbl.query else: bras = self.parse_bras(self.id) # filter query if len(bras) > 1: col_names = ["{0}_id".format(key)] col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names] top = tbl.query.with_entities(*col_vals).filter(tbl.bra_id.in_([b["id"] for b in bras])) elif bras[0]["id"] != "all": top = tbl.query.filter(tbl.bra_id == bras[0]["id"]) else: top = tbl.query.filter(getattr(tbl, attr_type+"_id") == self.id) top = top.filter_by(year=latest_year) \ .filter(func.char_length(getattr(tbl, key+"_id")) == length) \ .group_by(getattr(tbl, key+"_id")) \ .order_by(func.sum(getattr(tbl, val_var)).desc()) percent = 0 if top.first() != None: if isinstance(top.first(),tuple): obj = globals()[key.title()].query.get(top.first()[0]) percent = None else: obj = getattr(top.first(),key) num = float(getattr(top.first(),val_var)) den = 0 for x in top.all(): value = getattr(x,val_var) if value: den += float(value) percent = (num/float(den))*100 return {"name": "top_{0}".format(key), "value": obj.name(), "percent": percent, "id": obj.id, "group": "{0}_stats_{1}".format(dataset,latest_year)} else: return {"name": "top_{0}".format(key), "value": "-", "group": "{0}_stats_{1}".format(dataset,latest_year)}
def setItem(self, release, path, item_type, data): """ Set the data for the specified item. =============== ================================= Parameter Description =============== ================================= release Release path path Item path inside of the structure item_type The item type to create data The JSON encoded data item =============== ================================= ``Return:`` True on success """ result = None session = None # Check if this item type is supported if not item_type in self._supportedItems: raise ValueError("unknown item type '%s'" % item_type) # Acquire name from path name = os.path.basename(path) try: session = self._manager.getSession() # Load parent object parent = self._get_parent(release, path) if not parent: raise ValueError("cannot find parent object for '%s'" % path) parent = session.merge(parent) # Check if the current path is a container for that kind of # item type if not item_type in self._supportedItems[parent.item_type]['container']: raise ValueError("'%s' is not allowed for container '%s'" % (item_type, parent.item_type)) # Load instance of ConfigItem item = self._manager._getConfigItem(name=name, item_type=item_type, release=release, add=True) session.commit() item = session.merge(item) item.path = path # Check if item will be renamed if "name" in data and name != data["name"]: item.name = data["name"] # Updated marker for assignable elements item.assignable = bool(self.getItemsAssignableElements(release, item)) # Add us as child release_object = self._manager._getRelease(release) release_object = session.merge(release_object) release_object.config_items.append(item) parent.children.append(item) # Try to commit the changes session.commit() # Check if path has changed if "name" in data: newPath = os.path.dirname(path) if newPath != "/": newPath = newPath + "/" newPath= newPath + data['name'] if newPath != path: # Update path values for the child config items. # Get those entries that starts with 'oldB' and then replace the oldB part in the path. oldB = path.rstrip("/") newB = newPath.rstrip("/") length = len(oldB) session.query(ConfigItem).filter(ConfigItem.path.startswith(oldB)).update( \ {ConfigItem.path: func.concat(newB, func.right(ConfigItem.path, func.char_length(ConfigItem.path) - length))}, \ synchronize_session=False) session.commit() result = True except: self.log.error("Caught unknown exception %s" % sys.exc_info()[0]) session.rollback() raise finally: session.close() return result
def next(self): c = self.__class__ return self.query.filter(c.id > self.id).filter(func.char_length(c.id) == len(self.id)).order_by(c.id).first()
def next(self): c = self.__class__ return self.query.filter(c.id > self.id).filter( func.char_length(c.id) == len(self.id)).order_by(c.id).first()
def sections(self): sections = [] ''' Trade Section ''' if self.attr.id == "xxwld": export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) this_yo = self.models.Yo.query.filter_by(year = self.year).all() export_val = sum([o.export_val for o in this_yo]) export_subtitle = _(u"The total world trade in %(year)s was %(export_val)s.", year=self.year, export_val=num_format(export_val, "export_val")) export_subtitle += u" " past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year = past_yr).filter(self.models.Yo.export_val.isnot(None)).all() growth_val = median([o.export_val_growth_pct_5 for o in this_yo]) chg = "increased" if growth_val >= 0 else "decreased" export_subtitle += _(u"During the last five years exports have %(increased_decreased)s at a median annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s.", increased_decreased=chg, change_rate=num_format(growth_val*100), \ past_export_val=num_format(sum([o.export_val for o in past_yo])), past_year=past_yr, current_export_val=num_format(export_val), current_year=self.year) export_subtitle += u" " top_exports = self.models.Yp.query.filter_by(year = self.year, hs92_id_len=6).order_by(desc("export_val")).limit(2).all() export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total products exported, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/export_val)*100), \ second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/export_val)*100)) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yo_exp = self.models.Yo.query.filter_by(year = self.year).order_by(desc("export_val")).limit(5).all() origin_list = self.stringify_items(yo_exp, "export_val", "country") origin_subtitle = _(u"The top exporters globally are %(origins)s.", origins=origin_list) trade_section = { "builds": [ {"title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle}, {"title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle}, ] } else: export_subtitle, import_subtitle, dest_subtitle, origin_subtitle = [None]*4 export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) import_tmap = Build("tree_map", "hs92", "import", self.attr, "all", "show", self.year) yop_base = self.models.Yop.query.filter_by(year = self.year, origin = self.attr, hs92_id_len=6) # get growth past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year = past_yr, country = self.attr).first() this_yo = self.models.Yo.query.filter_by(year = self.year, country = self.attr).first() exp_val_stat = filter(lambda s: s["key"] == "export_val", self.stats()) if exp_val_stat and this_yo: exp_val_stat = exp_val_stat.pop() export_subtitle = "" if self.attr.id != "xxwld": exp_rank = num_format(exp_val_stat["rank"], "ordinal") if exp_val_stat["rank"] > 1 else "" export_subtitle += _(u"In %(year)s %(country)s exported $%(export_val)s, making it the %(export_rank)s largest exporter in the world.", year=self.year, country=self.attr.get_name(article=True), export_val=num_format(exp_val_stat["val"]), export_rank=exp_rank) export_subtitle += u" " if past_yo and this_yo.export_val_growth_pct_5: chg = "increased" if this_yo.export_val_growth_pct_5 >= 0 else "decreased" export_subtitle += _(u"During the last five years the exports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.export_val_growth_pct_5*100), \ past_export_val=num_format(past_yo.export_val), past_year=past_yr, current_export_val=num_format(this_yo.export_val), current_year=self.year) export_subtitle += u" " top_exports = yop_base.order_by(desc("export_val")).limit(2).all() if top_exports: # raise Exception(top_exports[0].product.get_profile_link(), num_format((top_exports[0].export_val/exp_val_stat["val"])*100), self.attr.get_name(article="of"), top_exports[1].product.get_profile_link(), num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total exports %(of_country)s, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/exp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) imp_val_stat = filter(lambda s: s["key"] == "import_val", self.stats()) if imp_val_stat and this_yo: imp_val_stat = imp_val_stat.pop() import_subtitle = "" if self.attr.id != "xxwld": imp_rank = num_format(imp_val_stat["rank"], "ordinal") if imp_val_stat["rank"] > 1 else "" import_subtitle += _(u"In %(year)s %(country)s imported $%(import_val)s, making it the %(import_rank)s largest importer in the world.", year=self.year, country=self.attr.get_name(article=True), import_val=num_format(imp_val_stat["val"]), import_rank=imp_rank) import_subtitle += u" " if past_yo and this_yo.import_val_growth_pct_5: chg = "increased" if this_yo.import_val_growth_pct_5 >= 0 else "decreased" import_subtitle += _(u"During the last five years the imports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_import_val)s in %(past_year)s to $%(current_import_val)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.import_val_growth_pct_5*100), \ past_import_val=num_format(past_yo.import_val), past_year=past_yr, current_import_val=num_format(this_yo.import_val), current_year=self.year) import_subtitle += u" " top_imports = yop_base.order_by(desc("import_val")).limit(2).all() if top_imports: import_subtitle += _(u"The most recent imports are led by %(top_import)s which represent %(top_import_pct)s%% of the total imports %(of_country)s, followed by %(second_import)s, which account for %(second_import_pct)s%%.", top_import=top_imports[0].product.get_profile_link(), top_import_pct=num_format((top_imports[0].import_val/imp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_import=top_imports[1].product.get_profile_link(), second_import_pct=num_format((top_imports[1].import_val/imp_val_stat["val"])*100)) dests_tmap = Build("tree_map", "hs92", "export", self.attr, "show", "all", self.year) yod_exp = self.models.Yod.query.filter_by(year = self.year, origin = self.attr).order_by(desc("export_val")).limit(5).all() if yod_exp: dest_list = self.stringify_items(yod_exp, "export_val", "dest") dest_subtitle = _(u"The top export destinations %(of_country)s are %(destinations)s.", of_country=self.attr.get_name(article="of"), destinations=dest_list) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yod_imp = self.models.Yod.query.filter_by(year = self.year, dest = self.attr).order_by(desc("export_val")).limit(5).all() if yod_imp: origin_list = self.stringify_items(yod_imp, "export_val", "origin") origin_subtitle = _(u"The top import origins %(of_country)s are %(origins)s.", of_country=self.attr.get_name(article="of"), origins=origin_list) # trade balance viz -- first_yo = self.models.Yo.query.filter_by(year = available_years["hs92"][-1], country = self.attr).first() tb_subtitle = "" tb_build = Build("line", "hs92", "show", self.attr, "all", "all", available_years["hs92"]) if first_yo: net_trade = this_yo.export_val - this_yo.import_val trade_balance = _("positive") if net_trade >= 0 else _("negative") trade_direction = _("exports") if net_trade >= 0 else _("imports") tb_subtitle = _(u"As of %(year)s %(country)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=self.year, country=self.attr.get_name(article=True), positive_negative=trade_balance, net_trade=num_format(abs(net_trade)), exports_imports=trade_direction) old_yo = self.models.Yo.query.filter_by(year = available_years["hs92"][0], country = self.attr).first() if old_yo: old_net_trade = old_yo.export_val - old_yo.import_val old_trade_balance = _("positive") if old_net_trade >= 0 else _("negative") old_trade_direction = _("exports") if old_net_trade >= 0 else _("imports") is_diff = True if old_trade_balance != trade_balance else False still_or_not = _("still") if old_trade_balance == trade_balance else "" tb_subtitle += u" " tb_subtitle += _(u"As compared to their trade balance in %(year)s when they %(still)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=available_years["hs92"][0], still=still_or_not, positive_negative=old_trade_balance, net_trade=num_format(abs(old_net_trade)), exports_imports=old_trade_direction) trade_section = { "builds": [ {"title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle, "tour":"This is just a test", "seq":5}, {"title": _(u"Imports"), "build": import_tmap, "subtitle": import_subtitle}, {"title": _(u"Trade Balance"), "build": tb_build, "subtitle": tb_subtitle}, {"title": _(u"Destinations"), "build": dests_tmap, "subtitle": dest_subtitle}, {"title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle}, ] } sections.append(trade_section) ''' Product Space Section ''' subtitle = False if self.attr.id != "xxwld": num_exports_w_rca = db.session.query(func.count(self.models.Yop.hs92_id)) \ .filter_by(year = self.year, origin = self.attr) \ .filter(self.models.Yop.export_rca >= 1) \ .filter(func.char_length(self.models.Yop.hs92_id)==6) \ .scalar() this_attr_yo = attrs.Yo.query.filter_by(year = self.year, country = self.attr).first() if this_attr_yo: eci = this_attr_yo.eci eci_rank = this_attr_yo.eci_rank if eci_rank: subtitle = _(u"The economy %(of_country)s has an Economic Complexity Index (ECI) of %(eci)s making it the %(eci_rank)s most complex country.", of_country=self.attr.get_name(article="of"), eci=num_format(eci), eci_rank=num_format(eci_rank, "ordinal")) subtitle += u" " else: subtitle = "" subtitle += _(u"%(country)s exports %(num_of_exports)s products with revealed comparative advantage " \ u"(meaning that its share of global exports is larger than what " \ u"would be expected from the size of its export economy " \ u"and from the size of a product’s global market).", country=self.attr.get_name(article=True), num_of_exports=num_exports_w_rca) product_space = Build("network", "hs92", "export", self.attr, "all", "show", self.year) ps_text = _(u"The product space is a network connecting products that are likely to be co-exported and can be used to predict the evolution of a country’s export structure.") if subtitle: ps_text = u"{}</p><p>{}".format(ps_text, subtitle) ps_section = { "title": _(u"Economic Complexity %(of_country)s", of_country=self.attr.get_name(article="of")), "builds": [ {"title": _(u"Product Space"), "build": product_space, "subtitle": ps_text, "tour":"The product space...", "seq":6} ] } ''' PGI Section ''' if self.attr.id != "xxwld": pgi_product_space = Build("network", "sitc", "pgi", self.attr, "all", "show", available_years["sitc"][-1]) subtitle = _("In this version of the product space products are colored according to their Product Gini Index, or PGI. The PGI of a product is the level of income inequality that we expect for the countries that export a product. For more information see: %(paper1)s and %(paper2)s.", country=self.attr.get_name(article=True), paper1="<a target='_blank' href='https://arxiv.org/abs/1505.07907'>Linking Economic Complexity, Institutions and Income Inequality</a>", paper2="<a target='_blank' href='https://arxiv.org/abs/1701.03770'>The structural constraints of income inequality in Latin America</a>") ps_section["builds"].append({"title": _(u"Complexity and Income Inequality"), "build": pgi_product_space, "subtitle": subtitle}) ''' ECI Ranking Section ''' if self.attr.id == "xxwld": line_rankings = Build("line", "sitc", "eci", "show", "all", "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = 1980 start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year subtitle = _("The Economic Complexity of each country visualized over the past %(year_range)s years.", year_range=year_range) ps_section["builds"].append({"title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle}) elif this_attr_yo and this_attr_yo.eci != None: line_rankings = Build("line", "sitc", "eci", "show", self.attr, "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = earliest_data.get(self.attr.id, 1980) start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year attr_yo_historic = attrs.Yo.query.filter_by(country=self.attr).filter(attrs.Yo.year == start_year).first() if attr_yo_historic and attr_yo_historic.eci_rank != None and this_attr_yo.eci_rank != None: eci_delta = this_attr_yo.eci_rank - attr_yo_historic.eci_rank inc_dec = _('increased') if eci_delta < 0 else _('decreased') subtitle = _("The Economic Complexity ranking %(of_country)s has %(increased_or_decreased)s by %(rank_delta)s places over the past %(year_range)s years from %(old_eci)s in %(old_year)s to %(current_eci)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_or_decreased=inc_dec, rank_delta=abs(eci_delta), year_range=year_range, old_eci=num_format(attr_yo_historic.eci_rank, "ordinal"), old_year=start_year, current_eci=num_format(this_attr_yo.eci_rank, "ordinal"), current_year=self.year) ps_section["builds"].append({"title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle}) sections.append(ps_section) sections.append({ "title": _(u"More on %(country)s from our other sites", country=self.attr.get_name(article=True)), "source": "sisters" }) ''' DataViva ''' # dv_section = make_dv_section(self) # sections.append(dv_section) ''' Data USA ''' if self.attr.id == "nausa": us_section = make_us_section() sections.append(us_section) ''' Data Africa ''' if any(country[0] == self.attr.id for country in data_africa_countries): da_country = filter(lambda x:x[0]==self.attr.id, data_africa_countries) africa_section = make_africa_section(self, da_country[0]) sections.append(africa_section) # raise Exception("found dataafrican country: {}".format(da_country[0])) ''' Pantheon ''' pantheon_id = "all" if self.attr.id == "xxwld" else self.attr.id_2char if pantheon_id: if self.attr.id != "xxwld": pantheon_id = pantheon_id.upper() pantheon_section = make_pantheon_section(pantheon_id, self.attr) sections.append(pantheon_section) return sections
def sections(self): sections = [] ''' Trade Section ''' export_subtitle, import_subtitle, dest_subtitle, origin_subtitle = [ None ] * 4 export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) import_tmap = Build("tree_map", "hs92", "import", self.attr, "all", "show", self.year) yop_base = self.models.Yop.query.filter_by(year=self.year, origin=self.attr, hs92_id_len=6) # get growth past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year=past_yr, country=self.attr).first() this_yo = self.models.Yo.query.filter_by(year=self.year, country=self.attr).first() exp_val_stat = filter(lambda s: s["key"] == "export_val", self.stats()) if exp_val_stat: exp_val_stat = exp_val_stat.pop() exp_rank = num_format( exp_val_stat["rank"], "ordinal") if exp_val_stat["rank"] > 1 else "" export_subtitle = _( u"In %(year)s %(country)s exported $%(export_val)s, making it the %(export_rank)s largest exporter in the world. ", year=self.year, country=self.attr.get_name(article=True), export_val=num_format(exp_val_stat["val"]), export_rank=exp_rank) if past_yo: chg = "increased" if this_yo.export_val_growth_pct_5 >= 0 else "decreased" export_subtitle += _(u"During the last five years the exports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s. ", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.export_val_growth_pct_5*100), \ past_export_val=num_format(past_yo.export_val), past_year=past_yr, current_export_val=num_format(this_yo.export_val), current_year=self.year) top_exports = yop_base.order_by(desc("export_val")).limit(2).all() if top_exports: # raise Exception(top_exports[0].product.get_profile_link(), num_format((top_exports[0].export_val/exp_val_stat["val"])*100), self.attr.get_name(article="of"), top_exports[1].product.get_profile_link(), num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total exports %(of_country)s, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/exp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) imp_val_stat = filter(lambda s: s["key"] == "import_val", self.stats()) if imp_val_stat: imp_val_stat = imp_val_stat.pop() imp_rank = num_format( imp_val_stat["rank"], "ordinal") if imp_val_stat["rank"] > 1 else "" import_subtitle = _( u"In %(year)s %(country)s imported $%(import_val)s, making it the %(import_rank)s largest importer in the world. ", year=self.year, country=self.attr.get_name(article=True), import_val=num_format(imp_val_stat["val"]), import_rank=imp_rank) if past_yo: chg = "increased" if this_yo.import_val_growth_pct_5 >= 0 else "decreased" import_subtitle += _(u"During the last five years the imports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_import_val)s in %(past_year)s to $%(current_import_val)s in %(current_year)s. ", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.import_val_growth_pct_5*100), \ past_import_val=num_format(past_yo.import_val), past_year=past_yr, current_import_val=num_format(this_yo.import_val), current_year=self.year) top_imports = yop_base.order_by(desc("import_val")).limit(2).all() if top_imports: import_subtitle += _(u"The most recent imports are led by %(top_import)s which represent %(top_import_pct)s%% of the total imports %(of_country)s, followed by %(second_import)s, which account for %(second_import_pct)s%%.", top_import=top_imports[0].product.get_profile_link(), top_import_pct=num_format((top_imports[0].import_val/imp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_import=top_imports[1].product.get_profile_link(), second_import_pct=num_format((top_imports[1].import_val/imp_val_stat["val"])*100)) dests_tmap = Build("tree_map", "hs92", "export", self.attr, "show", "all", self.year) yod_exp = self.models.Yod.query.filter_by( year=self.year, origin=self.attr).order_by(desc("export_val")).limit(5).all() if yod_exp: dest_list = self.stringify_items(yod_exp, "export_val", "dest") dest_subtitle = _( u"The top export destinations %(of_country)s are %(destinations)s.", of_country=self.attr.get_name(article="of"), destinations=dest_list) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yod_imp = self.models.Yod.query.filter_by( year=self.year, dest=self.attr).order_by(desc("export_val")).limit(5).all() if yod_imp: origin_list = self.stringify_items(yod_imp, "export_val", "origin") origin_subtitle = _( u"The top import origins %(of_country)s are %(origins)s.", of_country=self.attr.get_name(article="of"), origins=origin_list) # trade balance viz -- first_yo = self.models.Yo.query.filter_by( year=available_years["hs92"][-1], country=self.attr).first() tb_subtitle = "" tb_build = Build("line", "hs92", "show", self.attr, "all", "all", available_years["hs92"]) if first_yo: net_trade = this_yo.export_val - this_yo.import_val trade_balance = _("positive") if net_trade >= 0 else _("negative") trade_direction = _("exports") if net_trade >= 0 else _("imports") tb_subtitle = _( u"As of %(year)s %(country)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=self.year, country=self.attr.get_name(article=True), positive_negative=trade_balance, net_trade=num_format(abs(net_trade)), exports_imports=trade_direction) old_yo = self.models.Yo.query.filter_by( year=available_years["hs92"][0], country=self.attr).first() if old_yo: old_net_trade = old_yo.export_val - old_yo.import_val old_trade_balance = _("positive") if old_net_trade >= 0 else _( "negative") old_trade_direction = _( "exports") if old_net_trade >= 0 else _("imports") is_diff = True if old_trade_balance != trade_balance else False still_or_not = _( "still") if old_trade_balance == trade_balance else "" tb_subtitle += _( u" As compared to their trade balance in %(year)s when they %(still)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=available_years["hs92"][0], still=still_or_not, positive_negative=old_trade_balance, net_trade=num_format(abs(old_net_trade)), exports_imports=old_trade_direction) trade_section = { "builds": [ { "title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle, "tour": "This is just a test", "seq": 5 }, { "title": _(u"Imports"), "build": import_tmap, "subtitle": import_subtitle }, { "title": _(u"Trade Balance"), "build": tb_build, "subtitle": tb_subtitle }, { "title": _(u"Destinations"), "build": dests_tmap, "subtitle": dest_subtitle }, { "title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle }, ] } sections.append(trade_section) ''' Product Space Section ''' num_exports_w_rca = db.session.query(func.count(self.models.Yop.hs92_id)) \ .filter_by(year = self.year, origin = self.attr) \ .filter(self.models.Yop.export_rca >= 1) \ .filter(func.char_length(self.models.Yop.hs92_id)==6) \ .scalar() this_attr_yo = attrs.Yo.query.filter_by(year=self.year, country=self.attr).first() if this_attr_yo: eci = this_attr_yo.eci eci_rank = this_attr_yo.eci_rank if eci_rank: subtitle = _( u"The economy %(of_country)s has an Economic Complexity Index (ECI) of %(eci)s making it the %(eci_rank)s most complex country. ", of_country=self.attr.get_name(article="of"), eci=num_format(eci), eci_rank=num_format(eci_rank, "ordinal")) else: subtitle = "" subtitle += _(u"%(country)s exports %(num_of_exports)s products with revealed comparative advantage " \ u"(meaning that its share of global exports is larger than what " \ u"would be expected from the size of its export economy " \ u"and from the size of a product’s global market).", country=self.attr.get_name(article=True), num_of_exports=num_exports_w_rca) else: subtitle = "" product_space = Build("network", "hs92", "export", self.attr, "all", "show", self.year) ps_text = _( u"The product space is a network connecting products that are likely to be co-exported and can be used to predict the evolution of a country’s export structure." ) ps_text = u"{}</p><p>{}".format(ps_text, subtitle) ps_section = { "title": _(u"Economic Complexity %(of_country)s", of_country=self.attr.get_name(article="of")), "builds": [{ "title": _(u"Product Space"), "build": product_space, "subtitle": ps_text, "tour": "The product space...", "seq": 6 }] } ''' ECI Ranking Section ''' if this_attr_yo and this_attr_yo.eci != None: line_rankings = Build( "line", "sitc", "eci", "show", self.attr, "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = earliest_data.get(self.attr.id, 1980) start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year attr_yo_historic = attrs.Yo.query.filter_by( country=self.attr).filter(attrs.Yo.year == start_year).first() if attr_yo_historic.eci_rank: eci_delta = this_attr_yo.eci_rank - attr_yo_historic.eci_rank inc_dec = _('increased') if eci_delta < 0 else _('decreased') subtitle = _( """The Economic Complexity ranking %(of_country)s has %(increased_or_decreased)s by %(rank_delta)s places over the past %(year_range)s years from %(old_eci)s in %(old_year)s to %(current_eci)s in %(current_year)s.""", of_country=self.attr.get_name(article="of"), increased_or_decreased=inc_dec, rank_delta=abs(eci_delta), year_range=year_range, old_eci=num_format(attr_yo_historic.eci_rank, "ordinal"), old_year=start_year, current_eci=num_format(this_attr_yo.eci_rank, "ordinal"), current_year=self.year) ps_section["builds"].append({ "title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle }) sections.append(ps_section) sections.append({ "title": _(u"More on %(country)s from our sister sites", country=self.attr.get_name(article=True)), "source": "sisters" }) ''' DataViva ''' if self.attr.id == "sabra": dv_geo_map = "http://en.dataviva.info/apps/embed/geo_map/secex/all/all/all/bra/?color=export_val&controls=false&year=2013" dv_wages = "http://en.dataviva.info/apps/embed/bar/rais/all/all/all/bra/?controls=false&year=2013" dv_geo_map_subtitle = _(u""" This map shows the exports of Brazil by state.<br /><br /> DataViva is a visualization tool that provides official data on trade, industries, and education throughout Brazil. If you would like more info or to create a similar site get in touch with us at <a href='mailto:[email protected]'>[email protected]</a>. </p><p><a target='_blank' href='http://en.dataviva.info/apps/builder/geo_map/secex/all/all/all/bra/?color=export_val&controls=false&year=2013'>Explore on DataViva <i class='fa fa-external-link'></i></a> """) dv_wages_subtitle = _(u""" This bar chart shows the wage distribution for the working population in Brazil. </p><p><a target='_blank' href='http://en.dataviva.info/apps/builder/bar/rais/all/all/all/bra/?controls=false&year=2013'>Explore on DataViva <i class='fa fa-external-link'></i></a> """) dv_section = { "title": u"<a href='http://dataviva.info/' target='_blank'><img src='http://en.dataviva.info/static/img/nav/DataViva.png' /></a>", "source": "dataviva", "builds": [ { "title": _(u"State Exports"), "iframe": dv_geo_map, "subtitle": dv_geo_map_subtitle, "tour": "Profile pages also contain visualizations from other websites created by member of the OEC team. The following are 2 embeded visualization from DataViva, a similar visualization platorm centered around Brazilian data.", "seq": 7 }, { "title": _(u"Wage Distribution"), "iframe": dv_wages, "subtitle": dv_wages_subtitle }, ] } else: dv_country_id = "asrus" if self.attr.id == "eurus" else self.attr.id dv_munic_dest_iframe = "http://dataviva.info/apps/embed/tree_map/secex/all/all/{}/bra/?size=import_val&controls=false".format( dv_country_id) dv_munic_origin_iframe = "http://dataviva.info/apps/embed/tree_map/secex/all/all/{}/bra/?size=export_val&controls=false".format( dv_country_id) dv_munic_dest_subtitle = _( u""" This treemap shows the municipalities in Brazil that imported products from %(country)s.<br /><br /> DataViva is a visualization tool that provides official data on trade, industries, and education throughout Brazil. If you would like more info or to create a similar site get in touch with us at <a href='mailto:[email protected]'>[email protected]</a>. </p><p><a target='_blank' href='http://dataviva.info/apps/builder/tree_map/secex/all/all/%(dv_country_id)s/bra/?size=import_val&controls=false'>Explore on DataViva <i class='fa fa-external-link'></i></a> """, country=self.attr.get_name(article=True), dv_country_id=dv_country_id) dv_munic_origin_subtitle = _( u""" This treemap shows the municipalities in Brazil that exported products to %(country)s. </p><p><a target='_blank' href='http://dataviva.info/apps/builder/tree_map/secex/all/all/%(dv_country_id)s/bra/?size=export_val&controls=false'>Explore on DataViva <i class='fa fa-external-link'></i></a> """, country=self.attr.get_name(article=True), dv_country_id=dv_country_id) dv_section = { "title": u"<a href='http://dataviva.info/' target='_blank'><img src='http://en.dataviva.info/static/img/nav/DataViva.png' /></a>", "source": "dataviva", "builds": [ { "title": _(u"Brazilian Municipalities that import from %(country)s", country=self.attr.get_name(article=True)), "iframe": dv_munic_dest_iframe, "subtitle": dv_munic_dest_subtitle, "tour": "Profile pages also contain visualizations from other websites created by member of the OEC team. The following are 2 embeded visualization from DataViva, a similar visualization platorm centered around Brazilian data.", "seq": 7 }, { "title": _(u"Brazilian Municipalities that export to %(country)s", country=self.attr.get_name(article=True)), "iframe": dv_munic_origin_iframe, "subtitle": dv_munic_origin_subtitle }, ] } sections.append(dv_section) ''' Pantheon ''' if self.attr.id_2char: pantheon_iframe_fields = "http://pantheon.media.mit.edu/treemap/country_exports/{}/all/-4000/2010/H15/pantheon/embed".format( self.attr.id_2char.upper()) pantheon_link_fields = "<a target='_blank' href='http://pantheon.media.mit.edu/treemap/country_exports/{}/all/-4000/2010/H15/pantheon/'>Explore on Pantheon <i class='fa fa-external-link'></i></a>".format( self.attr.id_2char.upper()) pantheon_iframe_cities = "http://pantheon.media.mit.edu/treemap/country_by_city/all/{}/-4000/2010/H15/pantheon/embed".format( self.attr.id_2char.upper()) pantheon_link_cities = "<a target='_blank' href='http://pantheon.media.mit.edu/treemap/country_by_city/{}/all/-4000/2010/H15/pantheon/'>Explore on Pantheon <i class='fa fa-external-link'></i></a>".format( self.attr.id_2char.upper()) pantheon_section = { "title": "<a target='_blank' href='http://pantheon.media.mit.edu'><img src='http://pantheon.media.mit.edu/pantheon_logo.png' />", "source": "pantheon", "builds": [ { "title": _(u"Globally Famous People %(of_country)s", of_country=self.attr.get_name(article="of")), "iframe": pantheon_iframe_fields, "subtitle": _(u"This treemap shows the cultural exports %(of_country)s, as proxied by the production of globally famous historical characters.</p><p>%(pantheon_link)s", of_country=self.attr.get_name(article="of"), pantheon_link=pantheon_link_fields), "tour": "Pantheon...", "seq": 8 }, { "title": _(u"Globally Famous People %(of_country)s by City", of_country=self.attr.get_name(article="of")), "iframe": pantheon_iframe_cities, "subtitle": _(u"This treemap shows the cultural exports %(of_country)s by city, as proxied by the production of globally famous historical characters.</p><p>%(pantheon_link)s", of_country=self.attr.get_name(article="of"), pantheon_link=pantheon_link_cities) }, ] } sections.append(pantheon_section) return sections
def get_top_attr(self, tbl, val_var, attr_type, key, dataset): latest_year = __latest_year__[dataset] if key == "bra": length = 8 elif key == "isic" or key == "wld": length = 5 elif key == "cbo": length = 4 elif key == "hs": length = 6 if attr_type == "bra": agg = { 'val_usd': func.sum, 'eci': func.avg, 'eci_wld': func.avg, 'pci': func.avg, 'val_usd_growth_pct': func.avg, 'val_usd_growth_pct_5': func.avg, 'val_usd_growth_val': func.avg, 'val_usd_growth_val_5': func.avg, 'distance': func.avg, 'distance_wld': func.avg, 'opp_gain': func.avg, 'opp_gain_wld': func.avg, 'rca': func.avg, 'rca_wld': func.avg, 'wage': func.sum, 'num_emp': func.sum, 'num_est': func.sum, 'ici': func.avg, 'oci': func.avg, 'wage_growth_pct': func.avg, 'wage_growth_pct_5': func.avg, 'wage_growth_val': func.avg, 'wage_growth_val_5': func.avg, 'num_emp_growth_pct': func.avg, 'num_emp_pct_5': func.avg, 'num_emp_growth_val': func.avg, 'num_emp_growth_val_5': func.avg, 'distance': func.avg, 'importance': func.avg, 'opp_gain': func.avg, 'required': func.avg, 'rca': func.avg } if self.id == "all": top = tbl.query else: bras = self.parse_bras(self.id) # filter query if len(bras) > 1: col_names = ["{0}_id".format(key)] col_vals = [ cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names ] top = tbl.query.with_entities(*col_vals).filter( tbl.bra_id.in_([b["id"] for b in bras])) elif bras[0]["id"] != "all": top = tbl.query.filter(tbl.bra_id == bras[0]["id"]) else: top = tbl.query.filter(getattr(tbl, attr_type + "_id") == self.id) top = top.filter_by(year=latest_year) \ .filter(func.char_length(getattr(tbl, key+"_id")) == length) \ .group_by(getattr(tbl, key+"_id")) \ .order_by(func.sum(getattr(tbl, val_var)).desc()) percent = 0 if top.first() != None: if isinstance(top.first(), tuple): obj = globals()[key.title()].query.get(top.first()[0]) percent = None else: obj = getattr(top.first(), key) num = float(getattr(top.first(), val_var)) den = 0 for x in top.all(): value = getattr(x, val_var) if value: den += float(value) percent = (num / float(den)) * 100 return { "name": "top_{0}".format(key), "value": obj.name(), "percent": percent, "id": obj.id, "group": "{0}_stats_{1}".format(dataset, latest_year) } else: return { "name": "top_{0}".format(key), "value": "-", "group": "{0}_stats_{1}".format(dataset, latest_year) }
def get_val(self, tbl, val_var, attr_type, dataset, latest_year=None): if latest_year == None: latest_year = __latest_year__[dataset] if val_var == "wage_avg": calc_var = val_var val_var = "wage" else: calc_var = None if attr_type == "bra": agg = { 'population': func.sum, 'val_usd': func.sum, 'eci': func.avg, 'eci_wld': func.avg, 'pci': func.avg, 'val_usd_growth_pct': func.avg, 'val_usd_growth_pct_5': func.avg, 'val_usd_growth_val': func.avg, 'val_usd_growth_val_5': func.avg, 'distance': func.avg, 'distance_wld': func.avg, 'opp_gain': func.avg, 'opp_gain_wld': func.avg, 'rca': func.avg, 'rca_wld': func.avg, 'wage': func.sum, 'num_emp': func.sum, 'num_est': func.sum, 'ici': func.avg, 'oci': func.avg, 'wage_growth_pct': func.avg, 'wage_growth_pct_5': func.avg, 'wage_growth_val': func.avg, 'wage_growth_val_5': func.avg, 'num_emp_growth_pct': func.avg, 'num_emp_pct_5': func.avg, 'num_emp_growth_val': func.avg, 'num_emp_growth_val_5': func.avg, 'distance': func.avg, 'importance': func.avg, 'opp_gain': func.avg, 'required': func.avg, 'rca': func.avg } if self.id == "all": col_names = [val_var] col_vals = [ cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names ] total = tbl.query.with_entities(*col_vals) if dataset == "rais": total = total.filter( func.char_length(getattr(tbl, "isic_id")) == 1) elif dataset == "secex": total = total.filter( func.char_length(getattr(tbl, "hs_id")) == 2) elif dataset == "population": total = total.filter( func.char_length(getattr(tbl, "bra_id")) == 2) else: bras = self.parse_bras(self.id) # filter query if len(bras) > 1: col_names = [val_var] col_vals = [ cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names ] total = tbl.query.with_entities(*col_vals).filter( tbl.bra_id.in_([b["id"] for b in bras])) elif bras[0]["id"] != "all": total = tbl.query.filter(tbl.bra_id == bras[0]["id"]) else: total = tbl.query.filter( getattr(tbl, attr_type + "_id") == self.id) total = total.filter_by(year=latest_year).first() if total != None: if isinstance(total, tuple): val = total[0] else: val = getattr(total, val_var) if calc_var == "wage_avg": val = float(val) / getattr(total, "num_emp") else: val = 0 if val_var == "population": group = "" name = "population_{0}".format(latest_year) else: group = "{0}_stats_{1}".format(dataset, latest_year) if calc_var: name = calc_var else: name = "total_{0}".format(val_var) return {"name": name, "value": val, "group": group}
def _get_field_config(Key, Value, name, config, language, mandatory=False): # Determine XType xtype = 'textfield' predefined = None try: if config['type'] == 'Number': xtype = 'numberfield' elif config['type'] == 'Date': xtype = 'datefield' elif config['type'] == 'File': xtype = 'filefield' elif 'predefined' in config: xtype = 'combobox' predefined = config['predefined'] except KeyError: # Values from local YAML are one level deeper try: if 'predefined' in config['values']: #print "***" xtype = 'combobox' predefined = config['values']['predefined'] except KeyError: pass if language.id == 1: # English: no database query needed because YAML is in English by # default fieldLabel = fieldName = name if predefined: # For combobox, sort the values before returning it store = [] for i in sorted(predefined): store.append([i, i]) else: # Not English: Prepare query to translated keys keyTranslated = Session.query( Key.fk_key.label('original_id'), Key.key.label('translated') ).\ filter(Key.language == language).\ subquery() keys = Session.query( Key.key.label('original'), keyTranslated.c.translated.label('translated'), # Use column 'keyorvalue' to separate keys (0) from values (1) func.char_length('').label('keyorvalue') ).\ filter(Key.key == name).\ filter(Key.original == None).\ outerjoin(keyTranslated, keyTranslated.c.original_id == Key.id) query = keys # If predefined values are available, query these values as well (use # union to perform only one query) if predefined: # Collect values all_vals = [] for val in predefined: all_vals.append(val) valuesTranslated = Session.query( Value.fk_value.label('original_id'), Value.value.label('translated') ).\ filter(Value.language == language).\ subquery() values = Session.query( Value.value.label('original'), valuesTranslated.c.translated.label('translated'), # Use column 'keyorvalue' to separate keys (0) from values (1) func.char_length(' ').label('keyorvalue') ).\ filter(Value.value.in_(all_vals)).\ filter(Value.original == None).\ outerjoin(valuesTranslated, valuesTranslated.c.original_id == Value.id) query = keys.union(values) # Collect the values store = [] for x in query.all(): if x.keyorvalue == 0: # Key fieldName = x.original fieldLabel = (x.translated if x.translated is not None else x.original) elif x.keyorvalue == 1: # Value: First value is internal (original) value, second is # external (translated) value s = [] s.append(x.original) if x.translated is not None: s.append(x.translated) else: s.append(x.original) store.append(s) # Sort store by their display (translated) value if len(store) > 0: store = sorted(store, key=lambda value: value[1]) # Don't try to put together fieldConfig if fieldLabel or fieldName are not # there yet. This happens when initially loading application without any # Keys or Values in database. try: fieldLabel fieldName except: return None # Prepare return object fieldConfig = { 'allowBlank': not mandatory, 'fieldLabel': fieldLabel, 'xtype': xtype, 'name': fieldName } if xtype == 'combobox': fieldConfig['store'] = store try: fieldConfig['validator'] = config['validator'] except KeyError: pass return fieldConfig
def collection_by_depth(base, depth=None): return db.session.query(base).filter( func.char_length(base.id) == depth )
def prev(self): c = self.__class__ return self.query.filter(c.id < self.id).filter( func.char_length(c.id) == len(self.id)).order_by( c.id.desc()).first()
def parse_filter(kwargs, id_type, query, data_table, ret): from dataviva.attrs.models import Bra, Isic, Cbo, Hs, Wld query = query.group_by(getattr(data_table, id_type)) cat = id_type.split("_")[0] table = locals()[cat.title()] ids = kwargs[id_type].split("_") id_list = [] depth = None for id in ids: split_obj = id.split(".") kms = None if split_obj[0] != "all" and split_obj[0] != "show": obj_id = split_obj[0] if len(split_obj) > 1 and split_obj[1] != "show": kms = split_obj[1] ret_obj = table.query.get_or_404(obj_id).serialize() elif split_obj[0] == "all": obj_id = "all" if cat == "bra": ret_obj = Wld.query.get_or_404("sabra").serialize() ret_obj["id"] = "all" else: ret_obj = None else: obj_id = None ret_obj = None split_depth = id.split("show.") if len(split_depth) > 1: obj_depth = int(split_depth[1]) else: obj_depth = None if obj_id: if kms: neighbors = table.query.get(obj_id).get_neighbors(kms) obj_list = [] for m in neighbors: if m.bra_id_dest == obj_id: obj_list.append(m.bra_id_origin) else: obj_list.append(m.bra_id_dest) if "show" not in id: ret_obj["aggregates"] = obj_list ret["aggregate"] = True id_list = id_list + obj_list elif obj_depth and obj_depth > len(obj_id): if "plr" in obj_id: obj_list = table.query.get(obj_id).pr.all() obj_list = [m.id for m in obj_list] id_list = id_list + obj_list else: obj_list = table.query.filter(\ and_(func.char_length(getattr(table,"id")) == obj_depth, \ getattr(table,"id").startswith(obj_id))) munic_list = [d.id for d in obj_list.all()] id_list = id_list + munic_list elif obj_id == "all": if cat == "bra" or cat == "hs" or cat == "wld": parent_depth = 2 else: parent_depth = 1 obj_list = table.query.filter( func.char_length(getattr(table, "id")) == parent_depth) obj_list = [d.id for d in obj_list.all()] ret_obj["aggregates"] = obj_list ret["aggregate"] = True id_list = id_list + obj_list else: id_list.append(obj_id) elif obj_depth: depth = obj_depth if ret_obj: if cat not in ret: ret[cat] = [] ret[cat].append(ret_obj) if len(id_list) > 0: query = query.filter(getattr(data_table, id_type).in_(id_list)) elif depth: query = query.filter( func.char_length(getattr(data_table, id_type)) == depth) if cat == "bra" and obj_id: if len(ret[cat]) == 0: ret[cat].append(Wld.query.get_or_404("sabra").serialize()) ret = location_values(ret, cat) return {"query": query, "ret": ret}
async def do_roll(expression, session, character=None, output=[]): ''' Does the variable replacement and dice rolling ''' expression = expression.strip() match = re.match(r'^(.*)\s+((?:dis)?adv|dis|(?:dis)?advantage)$', expression) if match: expression = match.group(1) if match.group(2) in ['adv', 'advantage']: adv = 1 elif match.group(2) in ['dis', 'disadv', 'disadvantage']: adv = -1 else: raise Exception('Invalid adv/disadv operator') else: adv = 0 original_expression = expression # Set up operations def roll_dice(a, b, *, silent=False): rolls = [] for _ in range(a): if b > 0: n = random.randint(1, b) elif b < 0: n = random.randint(b, -1) else: n = 0 rolls.append(n) value = sum(rolls) if not silent: output.append('{}d{}: {} = {}'.format(a, b, ' + '.join(map(str, rolls)), value)) return value def great_weapon_fighting(a, b, low=2, *, silent=False): rolls = [] rerolls = [] value = 0 for _ in range(a): n = roll_dice(1, b, silent=True) rolls.append(n) if n <= low: n2 = random.randint(1, b) rerolls.append(n2) value += n2 else: value += n if not silent: rolled = ' + '.join(map(str, rolls)) if rerolls: rerolled = list(filter(lambda a: a > low, rolls)) rerolled.extend(rerolls) rerolled = ' + '.join(map(str, rerolled)) output.append('{}g{}: {}, rerolled: {} = {}'.format( a, b, rolled, rerolled, value)) else: output.append('{}g{}: {} = {}'.format(a, b, rolled, value)) return value def roll_advantage(a, b, *, silent=False): if a == 1 and b == 20: first = roll_dice(a, b, silent=True) second = roll_dice(a, b, silent=True) out = max(first, second) if not silent: output.append('{}d{}: max({}, {}) = {}'.format( a, b, first, second, out)) else: out = roll_dice(a, b, silent=silent) return out def roll_disadvantage(a, b, *, silent=False): if a == 1 and b == 20: first = roll_dice(a, b, silent=True) second = roll_dice(a, b, silent=True) out = min(first, second) if not silent: output.append('{}d{}: min({}, {}) = {}'.format( a, b, first, second, out)) else: out = roll_dice(a, b, silent=silent) return out operations = equations.operations.copy() operations.append({'>': max, '<': min}) dice = {} if adv == 0: dice['d'] = roll_dice elif adv > 0: dice['d'] = roll_advantage else: dice['d'] = roll_disadvantage dice['D'] = dice['d'] dice['g'] = great_weapon_fighting dice['G'] = dice['g'] operations.append(dice) unary = equations.unary.copy() unary['!'] = lambda a: a // 2 - 5 output.append('`{}`'.format(expression)) if character: # replace rolls rolls = session.query(m.Roll)\ .filter_by(character=character)\ .order_by(func.char_length(m.Roll.name).desc()) rep = {roll.name: '({})'.format(roll.expression) for roll in rolls} expr = re.compile('|'.join( map(re.escape, sorted(rep.keys(), key=len, reverse=True)))) for _ in range(3): expression = expr.sub(lambda m: rep[m.group(0)], expression) temp = '`{}`'.format(expression) if temp != output[-1]: output.append(temp) else: break # replace variables variables = session.query(m.Variable)\ .filter_by(character=character)\ .order_by(func.char_length(m.Variable.name).desc()) rep = {var.name: '({})'.format(var.value) for var in variables} expr = re.compile('|'.join( map(re.escape, sorted(rep.keys(), key=len, reverse=True)))) expression = expr.sub(lambda m: rep[m.group(0)], expression) temp = '`{}`'.format(expression) if temp != output[-1]: output.append(temp) # validate for token in re.findall(r'[a-zA-Z]+', expression): if token not in chain(*operations) and token not in unary: search = r'[a-zA-Z]*({})[a-zA-Z]*'.format(re.escape(token)) search = re.search(search, original_expression) if search: token = search.group(1) raise equations.EquationError('\n{}\nCould not find: `{}`'.format( '\n'.join(output), token)) # do roll roll = equations.solve(expression, operations=operations, unary=unary) if roll % 1 == 0: roll = int(roll) if character: output.append('{} rolled {}'.format(str(character), roll)) else: output.append('You rolled {}'.format(roll)) return roll
def sections(self): sections = [] ''' Trade Section ''' if self.attr.id == "xxwld": export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) this_yo = self.models.Yo.query.filter_by(year = self.year).all() export_val = sum([o.export_val for o in this_yo]) export_subtitle = _(u"The total world trade in %(year)s was %(export_val)s. ", year=self.year, export_val=num_format(export_val, "export_val")) past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year = past_yr).all() growth_val = median([o.export_val_growth_pct_5 for o in this_yo]) chg = "increased" if growth_val >= 0 else "decreased" export_subtitle += _(u"During the last five years exports have %(increased_decreased)s at a median annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s. ", increased_decreased=chg, change_rate=num_format(growth_val*100), \ past_export_val=num_format(sum([o.export_val for o in past_yo])), past_year=past_yr, current_export_val=num_format(export_val), current_year=self.year) top_exports = self.models.Yp.query.filter_by(year = self.year, hs92_id_len=6).order_by(desc("export_val")).limit(2).all() export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total products exported, followed by %(second_export)s, which account for %(second_export_pct)s%%. ", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/export_val)*100), \ second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/export_val)*100)) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yo_exp = self.models.Yo.query.filter_by(year = self.year).order_by(desc("export_val")).limit(5).all() origin_list = self.stringify_items(yo_exp, "export_val", "country") origin_subtitle = _(u"The top exporters globally are %(origins)s.", origins=origin_list) trade_section = { "builds": [ {"title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle, "tour":"This is just a test", "seq":5}, {"title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle}, ] } else: export_subtitle, import_subtitle, dest_subtitle, origin_subtitle = [None]*4 export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) import_tmap = Build("tree_map", "hs92", "import", self.attr, "all", "show", self.year) yop_base = self.models.Yop.query.filter_by(year = self.year, origin = self.attr, hs92_id_len=6) # get growth past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year = past_yr, country = self.attr).first() this_yo = self.models.Yo.query.filter_by(year = self.year, country = self.attr).first() exp_val_stat = filter(lambda s: s["key"] == "export_val", self.stats()) if exp_val_stat: exp_val_stat = exp_val_stat.pop() export_subtitle = "" if self.attr.id != "xxwld": exp_rank = num_format(exp_val_stat["rank"], "ordinal") if exp_val_stat["rank"] > 1 else "" export_subtitle += _(u"In %(year)s %(country)s exported $%(export_val)s, making it the %(export_rank)s largest exporter in the world. ", year=self.year, country=self.attr.get_name(article=True), export_val=num_format(exp_val_stat["val"]), export_rank=exp_rank) if past_yo: chg = "increased" if this_yo.export_val_growth_pct_5 >= 0 else "decreased" export_subtitle += _(u"During the last five years the exports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s. ", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.export_val_growth_pct_5*100), \ past_export_val=num_format(past_yo.export_val), past_year=past_yr, current_export_val=num_format(this_yo.export_val), current_year=self.year) top_exports = yop_base.order_by(desc("export_val")).limit(2).all() if top_exports: # raise Exception(top_exports[0].product.get_profile_link(), num_format((top_exports[0].export_val/exp_val_stat["val"])*100), self.attr.get_name(article="of"), top_exports[1].product.get_profile_link(), num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total exports %(of_country)s, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/exp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) imp_val_stat = filter(lambda s: s["key"] == "import_val", self.stats()) if imp_val_stat: imp_val_stat = imp_val_stat.pop() import_subtitle = "" if self.attr.id != "xxwld": imp_rank = num_format(imp_val_stat["rank"], "ordinal") if imp_val_stat["rank"] > 1 else "" import_subtitle += _(u"In %(year)s %(country)s imported $%(import_val)s, making it the %(import_rank)s largest importer in the world. ", year=self.year, country=self.attr.get_name(article=True), import_val=num_format(imp_val_stat["val"]), import_rank=imp_rank) if past_yo: chg = "increased" if this_yo.import_val_growth_pct_5 >= 0 else "decreased" import_subtitle += _(u"During the last five years the imports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_import_val)s in %(past_year)s to $%(current_import_val)s in %(current_year)s. ", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.import_val_growth_pct_5*100), \ past_import_val=num_format(past_yo.import_val), past_year=past_yr, current_import_val=num_format(this_yo.import_val), current_year=self.year) top_imports = yop_base.order_by(desc("import_val")).limit(2).all() if top_imports: import_subtitle += _(u"The most recent imports are led by %(top_import)s which represent %(top_import_pct)s%% of the total imports %(of_country)s, followed by %(second_import)s, which account for %(second_import_pct)s%%.", top_import=top_imports[0].product.get_profile_link(), top_import_pct=num_format((top_imports[0].import_val/imp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_import=top_imports[1].product.get_profile_link(), second_import_pct=num_format((top_imports[1].import_val/imp_val_stat["val"])*100)) dests_tmap = Build("tree_map", "hs92", "export", self.attr, "show", "all", self.year) yod_exp = self.models.Yod.query.filter_by(year = self.year, origin = self.attr).order_by(desc("export_val")).limit(5).all() if yod_exp: dest_list = self.stringify_items(yod_exp, "export_val", "dest") dest_subtitle = _(u"The top export destinations %(of_country)s are %(destinations)s.", of_country=self.attr.get_name(article="of"), destinations=dest_list) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yod_imp = self.models.Yod.query.filter_by(year = self.year, dest = self.attr).order_by(desc("export_val")).limit(5).all() if yod_imp: origin_list = self.stringify_items(yod_imp, "export_val", "origin") origin_subtitle = _(u"The top import origins %(of_country)s are %(origins)s.", of_country=self.attr.get_name(article="of"), origins=origin_list) # trade balance viz -- first_yo = self.models.Yo.query.filter_by(year = available_years["hs92"][-1], country = self.attr).first() tb_subtitle = "" tb_build = Build("line", "hs92", "show", self.attr, "all", "all", available_years["hs92"]) if first_yo: net_trade = this_yo.export_val - this_yo.import_val trade_balance = _("positive") if net_trade >= 0 else _("negative") trade_direction = _("exports") if net_trade >= 0 else _("imports") tb_subtitle = _(u"As of %(year)s %(country)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=self.year, country=self.attr.get_name(article=True), positive_negative=trade_balance, net_trade=num_format(abs(net_trade)), exports_imports=trade_direction) old_yo = self.models.Yo.query.filter_by(year = available_years["hs92"][0], country = self.attr).first() if old_yo: old_net_trade = old_yo.export_val - old_yo.import_val old_trade_balance = _("positive") if old_net_trade >= 0 else _("negative") old_trade_direction = _("exports") if old_net_trade >= 0 else _("imports") is_diff = True if old_trade_balance != trade_balance else False still_or_not = _("still") if old_trade_balance == trade_balance else "" tb_subtitle += _(u" As compared to their trade balance in %(year)s when they %(still)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=available_years["hs92"][0], still=still_or_not, positive_negative=old_trade_balance, net_trade=num_format(abs(old_net_trade)), exports_imports=old_trade_direction) trade_section = { "builds": [ {"title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle, "tour":"This is just a test", "seq":5}, {"title": _(u"Imports"), "build": import_tmap, "subtitle": import_subtitle}, {"title": _(u"Trade Balance"), "build": tb_build, "subtitle": tb_subtitle}, {"title": _(u"Destinations"), "build": dests_tmap, "subtitle": dest_subtitle}, {"title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle}, ] } sections.append(trade_section) ''' Product Space Section ''' subtitle = False if self.attr.id != "xxwld": num_exports_w_rca = db.session.query(func.count(self.models.Yop.hs92_id)) \ .filter_by(year = self.year, origin = self.attr) \ .filter(self.models.Yop.export_rca >= 1) \ .filter(func.char_length(self.models.Yop.hs92_id)==6) \ .scalar() this_attr_yo = attrs.Yo.query.filter_by(year = self.year, country = self.attr).first() if this_attr_yo: eci = this_attr_yo.eci eci_rank = this_attr_yo.eci_rank if eci_rank: subtitle = _(u"The economy %(of_country)s has an Economic Complexity Index (ECI) of %(eci)s making it the %(eci_rank)s most complex country. ", of_country=self.attr.get_name(article="of"), eci=num_format(eci), eci_rank=num_format(eci_rank, "ordinal")) else: subtitle = "" subtitle += _(u"%(country)s exports %(num_of_exports)s products with revealed comparative advantage " \ u"(meaning that its share of global exports is larger than what " \ u"would be expected from the size of its export economy " \ u"and from the size of a product’s global market).", country=self.attr.get_name(article=True), num_of_exports=num_exports_w_rca) product_space = Build("network", "hs92", "export", self.attr, "all", "show", self.year) ps_text = _(u"The product space is a network connecting products that are likely to be co-exported and can be used to predict the evolution of a country’s export structure.") if subtitle: ps_text = u"{}</p><p>{}".format(ps_text, subtitle) ps_section = { "title": _(u"Economic Complexity %(of_country)s", of_country=self.attr.get_name(article="of")), "builds": [ {"title": _(u"Product Space"), "build": product_space, "subtitle": ps_text, "tour":"The product space...", "seq":6} ] } ''' ECI Ranking Section ''' if self.attr.id == "xxwld": line_rankings = Build("line", "sitc", "eci", "show", "all", "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = 1980 start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year subtitle = _("""The Economic Complexities of each country visualized over the past %(year_range)s years.""", year_range=year_range) ps_section["builds"].append({"title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle}) elif this_attr_yo and this_attr_yo.eci != None: line_rankings = Build("line", "sitc", "eci", "show", self.attr, "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = earliest_data.get(self.attr.id, 1980) start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year attr_yo_historic = attrs.Yo.query.filter_by(country=self.attr).filter(attrs.Yo.year == start_year).first() if attr_yo_historic.eci_rank: eci_delta = this_attr_yo.eci_rank - attr_yo_historic.eci_rank inc_dec = _('increased') if eci_delta < 0 else _('decreased') subtitle = _("""The Economic Complexity ranking %(of_country)s has %(increased_or_decreased)s by %(rank_delta)s places over the past %(year_range)s years from %(old_eci)s in %(old_year)s to %(current_eci)s in %(current_year)s.""", of_country=self.attr.get_name(article="of"), increased_or_decreased=inc_dec, rank_delta=abs(eci_delta), year_range=year_range, old_eci=num_format(attr_yo_historic.eci_rank, "ordinal"), old_year=start_year, current_eci=num_format(this_attr_yo.eci_rank, "ordinal"), current_year=self.year) ps_section["builds"].append({"title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle}) sections.append(ps_section) sections.append({ "title": _(u"More on %(country)s from our sister sites", country=self.attr.get_name(article=True)), "source": "sisters" }) ''' DataViva ''' if self.attr.id == "xxwld": dv_munic_dest_iframe = "http://dataviva.info/apps/embed/tree_map/secex/all/all/all/bra/?size=import_val&controls=false" dv_munic_dest_subtitle = _(u""" This treemap shows the municipalities in Brazil that imported products internationally.<br /><br /> DataViva is a visualization tool that provides official data on trade, industries, and education throughout Brazil. If you would like more info or to create a similar site get in touch with us at <a href='mailto:[email protected]'>[email protected]</a>. </p><p><a target='_blank' href='http://dataviva.info/apps/builder/tree_map/secex/all/all/all/bra/?size=import_val&controls=false'>Explore on DataViva <i class='fa fa-external-link'></i></a> """, country=self.attr.get_name(article=True)) dv_section = { "title": u"<a href='http://dataviva.info/' target='_blank'><img src='http://en.dataviva.info/static/img/nav/DataViva.png' /></a>", "source": "dataviva", "builds": [ {"title": _(u"Imports of Brazil by Municipality"), "iframe": dv_munic_dest_iframe, "subtitle": dv_munic_dest_subtitle, "tour":"Profile pages also contain visualizations from other websites created by member of the OEC team. The following are 2 embeded visualization from DataViva, a similar visualization platorm centered around Brazilian data.", "seq":7} ] } elif self.attr.id == "sabra": dv_geo_map = "http://en.dataviva.info/apps/embed/geo_map/secex/all/all/all/bra/?color=export_val&controls=false&year=2013" dv_wages = "http://en.dataviva.info/apps/embed/bar/rais/all/all/all/bra/?controls=false&year=2013" dv_geo_map_subtitle = _(u""" This map shows the exports of Brazil by state.<br /><br /> DataViva is a visualization tool that provides official data on trade, industries, and education throughout Brazil. If you would like more info or to create a similar site get in touch with us at <a href='mailto:[email protected]'>[email protected]</a>. </p><p><a target='_blank' href='http://en.dataviva.info/apps/builder/geo_map/secex/all/all/all/bra/?color=export_val&controls=false&year=2013'>Explore on DataViva <i class='fa fa-external-link'></i></a> """) dv_wages_subtitle = _(u""" This bar chart shows the wage distribution for the working population in Brazil. </p><p><a target='_blank' href='http://en.dataviva.info/apps/builder/bar/rais/all/all/all/bra/?controls=false&year=2013'>Explore on DataViva <i class='fa fa-external-link'></i></a> """) dv_section = { "title": u"<a href='http://dataviva.info/' target='_blank'><img src='http://en.dataviva.info/static/img/nav/DataViva.png' /></a>", "source": "dataviva", "builds": [ {"title": _(u"State Exports"), "iframe": dv_geo_map, "subtitle": dv_geo_map_subtitle, "tour":"Profile pages also contain visualizations from other websites created by member of the OEC team. The following are 2 embeded visualization from DataViva, a similar visualization platorm centered around Brazilian data.", "seq":7}, {"title": _(u"Wage Distribution"), "iframe": dv_wages, "subtitle": dv_wages_subtitle}, ] } else: dv_country_id = "asrus" if self.attr.id == "eurus" else self.attr.id dv_munic_dest_iframe = "http://dataviva.info/apps/embed/tree_map/secex/all/all/{}/bra/?size=import_val&controls=false".format(dv_country_id) dv_munic_origin_iframe = "http://dataviva.info/apps/embed/tree_map/secex/all/all/{}/bra/?size=export_val&controls=false".format(dv_country_id) dv_munic_dest_subtitle = _(u""" This treemap shows the municipalities in Brazil that imported products from %(country)s.<br /><br /> DataViva is a visualization tool that provides official data on trade, industries, and education throughout Brazil. If you would like more info or to create a similar site get in touch with us at <a href='mailto:[email protected]'>[email protected]</a>. </p><p><a target='_blank' href='http://dataviva.info/apps/builder/tree_map/secex/all/all/%(dv_country_id)s/bra/?size=import_val&controls=false'>Explore on DataViva <i class='fa fa-external-link'></i></a> """, country=self.attr.get_name(article=True), dv_country_id=dv_country_id) dv_munic_origin_subtitle = _(u""" This treemap shows the municipalities in Brazil that exported products to %(country)s. </p><p><a target='_blank' href='http://dataviva.info/apps/builder/tree_map/secex/all/all/%(dv_country_id)s/bra/?size=export_val&controls=false'>Explore on DataViva <i class='fa fa-external-link'></i></a> """, country=self.attr.get_name(article=True), dv_country_id=dv_country_id) dv_section = { "title": u"<a href='http://dataviva.info/' target='_blank'><img src='http://en.dataviva.info/static/img/nav/DataViva.png' /></a>", "source": "dataviva", "builds": [ {"title": _(u"Brazilian Municipalities that import from %(country)s", country=self.attr.get_name(article=True)), "iframe": dv_munic_dest_iframe, "subtitle": dv_munic_dest_subtitle, "tour":"Profile pages also contain visualizations from other websites created by member of the OEC team. The following are 2 embeded visualization from DataViva, a similar visualization platorm centered around Brazilian data.", "seq":7}, {"title": _(u"Brazilian Municipalities that export to %(country)s", country=self.attr.get_name(article=True)), "iframe": dv_munic_origin_iframe, "subtitle": dv_munic_origin_subtitle}, ] } sections.append(dv_section) ''' Pantheon ''' pantheon_id = "all" if self.attr.id == "xxwld" else self.attr.id_2char if pantheon_id: if self.attr.id != "xxwld": pantheon_id = pantheon_id.upper() pantheon_iframe_fields = "http://pantheon.media.mit.edu/treemap/country_exports/{}/all/-4000/2010/H15/pantheon/embed".format(pantheon_id) pantheon_link_fields = "<a target='_blank' href='http://pantheon.media.mit.edu/treemap/country_exports/{}/all/-4000/2010/H15/pantheon/'>Explore on Pantheon <i class='fa fa-external-link'></i></a>".format(pantheon_id) pantheon_iframe_cities = "http://pantheon.media.mit.edu/treemap/country_by_city/all/{}/-4000/2010/H15/pantheon/embed".format(pantheon_id) pantheon_link_cities = "<a target='_blank' href='http://pantheon.media.mit.edu/treemap/country_by_city/{}/all/-4000/2010/H15/pantheon/'>Explore on Pantheon <i class='fa fa-external-link'></i></a>".format(pantheon_id) pantheon_section = { "title": "<a target='_blank' href='http://pantheon.media.mit.edu'><img src='http://pantheon.media.mit.edu/pantheon_logo.png' />", "source": "pantheon", "builds": [ {"title": _(u"Globally Famous People %(of_country)s", of_country=self.attr.get_name(article="of")), "iframe": pantheon_iframe_fields, "subtitle": _(u"This treemap shows the cultural exports %(of_country)s, as proxied by the production of globally famous historical characters.</p><p>%(pantheon_link)s", of_country=self.attr.get_name(article="of"), pantheon_link=pantheon_link_fields), "tour":"Pantheon...", "seq":8 }, {"title": _(u"Globally Famous People %(of_country)s by City", of_country=self.attr.get_name(article="of")), "iframe": pantheon_iframe_cities, "subtitle": _(u"This treemap shows the cultural exports %(of_country)s by city, as proxied by the production of globally famous historical characters.</p><p>%(pantheon_link)s", of_country=self.attr.get_name(article="of"), pantheon_link=pantheon_link_cities) }, ] } sections.append(pantheon_section) return sections
def attrs(attr="bra",Attr_id=None, depth=None): Attr = globals()[attr.capitalize()] Attr_weight_mergeid = "{0}_id".format(attr) if attr == "bra": Attr_weight_tbl = Yb Attr_weight_col = "population" elif attr == "cnae": Attr_weight_tbl = Yi Attr_weight_col = "num_jobs" elif attr == "cbo": Attr_weight_tbl = Yo Attr_weight_col = "num_jobs" elif attr == "hs": Attr_weight_tbl = Ymp Attr_weight_col = "export_val" elif attr == "wld": Attr_weight_tbl = Ymw Attr_weight_col = "export_val" elif attr == "course_hedu": Attr_weight_tbl = Yc_hedu Attr_weight_col = "enrolled" elif attr == "university": Attr_weight_tbl = Yu Attr_weight_col = "enrolled" elif attr == "school": Attr_weight_tbl = Ys Attr_weight_col = "enrolled" elif attr == "course_sc": Attr_weight_tbl = Yc_sc Attr_weight_col = "enrolled" depths = {} depths["bra"] = [1, 3, 5, 7, 9] depths["cnae"] = [1, 3, 6] depths["cbo"] = [1, 4] depths["hs"] = [2, 6] depths["wld"] = [2, 5] depths["course_hedu"] = [2, 6] depths["university"] = [5] depths["course_sc"] = [2, 5] depths["school"] = [8] depth = request.args.get('depth', depth) order = request.args.get('order', None) offset = request.args.get('offset', None) limit = request.args.get('limit', None) if offset: offset = float(offset) limit = limit or 50 elif limit: offset = float(0) lang = request.args.get('lang', None) or g.locale ret = {} dataset = "rais" if Attr == Wld or Attr == Hs: dataset = "secex" elif Attr == Course_hedu or Attr == University: dataset = "hedu" elif Attr == Course_sc or Attr == School: dataset = "sc" elif Attr == Bra: dataset = "population" cache_id = "attrs:" + request.path + lang if depth: cache_id = cache_id + "/" + depth # first lets test if this query is cached cached_q = cached_query(cache_id) if cached_q and limit is None: ret = make_response(cached_q) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret # if an ID is supplied only return that if Attr_id: # the '.show.' indicates that we are looking for a specific nesting if ".show." in Attr_id: this_attr, ret["nesting_level"] = Attr_id.split(".show.") # filter table by requested nesting level attrs = Attr.query \ .filter(Attr.id.startswith(this_attr)) \ .filter(func.char_length(Attr.id) == ret["nesting_level"]).all() # the 'show.' indicates that we are looking for a specific nesting elif "show." in Attr_id: ret["nesting_level"] = Attr_id.split(".")[1] # filter table by requested nesting level attrs = Attr.query.filter(func.char_length(Attr.id) == ret["nesting_level"]).all() # the '.' here means we want to see all attrs within a certain distance elif "." in Attr_id: this_attr, distance = Attr_id.split(".") this_attr = Attr.query.get_or_404(this_attr) attrs = this_attr.get_neighbors(distance) else: attrs = [Attr.query.get_or_404(Attr_id)] ret["data"] = [fix_name(a.serialize(), lang) for a in attrs] # an ID/filter was not provided else: latest_year = __year_range__[dataset][-1] latest_month = False if "-" in latest_year: latest_year, latest_month = latest_year.split("-") latest_month = int(latest_month) latest_year = int(latest_year) conds = [getattr(Attr_weight_tbl,"{0}_id".format(attr)) == Attr.id, Attr_weight_tbl.year == latest_year] if latest_month: conds.append(Attr_weight_tbl.month == latest_month) query = db.session.query(Attr,Attr_weight_tbl).outerjoin(Attr_weight_tbl, and_(*conds)) if Attr == School: query = query.filter(Attr.is_vocational == 1) if depth: query = query.filter(func.char_length(Attr.id) == depth) else: query = query.filter(func.char_length(Attr.id).in_(depths[attr])) if order: direction = "asc" if "." in order: o, direction = order.split(".") else: o = order if o == "name": o = "name_{0}".format(lang) if o == Attr_weight_col: order_table = Attr_weight_tbl else: order_table = Attr if direction == "asc": query = query.order_by(asc(getattr(order_table,o))) elif direction == "desc": query = query.order_by(desc(getattr(order_table,o))) if limit: query = query.limit(limit).offset(offset) attrs_all = query.all() # just get items available in DB attrs_w_data = None if depth is None and limit is None: attrs_w_data = db.session.query(Attr, Attr_weight_tbl) \ .filter(getattr(Attr_weight_tbl, Attr_weight_mergeid) == Attr.id) \ .group_by(Attr.id) attrs_w_data = [a[0].id for a in attrs_w_data] attrs = [] # all_planning_regions = {} for i, a in enumerate(attrs_all): b = a[0].serialize() if a[1]: c = a[1].serialize() if Attr_weight_col in c: b[Attr_weight_col] = c[Attr_weight_col] else: b[Attr_weight_col] = 0 else: b[Attr_weight_col] = 0 a = b if attrs_w_data: a["available"] = False if a["id"] in attrs_w_data: a["available"] = True # if Attr_weight_col == "population" and len(a["id"]) == 9 and a["id"][:3] == "4mg": # if not all_planning_regions: # all_planning_regions = get_planning_region_map() # if a["id"] in all_planning_regions: # plr = all_planning_regions[a["id"]] # a["plr"] = plr if order: a["rank"] = int(i+offset+1) if attr == "bra" and "id_ibge" not in a: a["id_ibge"] = False attrs.append(fix_name(a, lang)) ret["data"] = attrs ret = jsonify(ret) ret.data = gzip_data(ret.data) if limit is None and cached_q is None: cached_query(cache_id, ret.data) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret
def intro(self): all_paragraphs = [] ''' Paragraph #2 ''' # get total world trade rank this_yp = self.models.Yp.query.filter_by(year = self.year, product = self.attr).first() all_yp = self.models.Yp.query.filter_by(year = self.year) \ .filter(func.char_length(getattr(self.models.Yp, "{}_id".format(self.classification))) == len(self.attr.id)) \ .order_by(desc("export_val")).all() if this_yp: econ_rank = num_format(all_yp.index(this_yp) + 1, "ordinal") if all_yp.index(this_yp) else "" # get PCI ranking p2 = _(u"%(product)s the %(economic_rank)s most traded product", product=self.attr.get_name(verb=True), economic_rank=econ_rank) pci_rank = this_yp.pci_rank if pci_rank: pci_rank = num_format(pci_rank, "ordinal") if pci_rank > 1 else "" p2 += _(u" and the %(pci_rank)s most complex product according to the <a href='/en/rankings/hs92/'>Product Complexity Index (PCI)</a>", pci_rank=pci_rank) p2 += "." all_paragraphs.append(p2) ''' Paragraph #3 ''' yop_exp = self.models.Yop.query.filter_by(year = self.year, product = self.attr).filter(self.models.Yop.export_val!=None).order_by(desc("export_val")).limit(5).all() if yop_exp: exporters = self.stringify_items(yop_exp, "export_val", "origin") yop_imp = self.models.Yop.query.filter_by(year=self.year, product=self.attr).filter(self.models.Yop.import_val!=None).order_by(desc("import_val")).limit(5).all() importers = self.stringify_items(yop_imp, "import_val", "origin") p3 = _(u"The top exporters of %(product)s are %(exporters)s. The top importers are %(importers)s.", product=self.attr.get_name(), exporters=exporters, importers=importers) all_paragraphs.append(p3) ''' Paragraph #4 ''' p4 = [] # find out which countries this product is their #1 export/import countries_top = self.models.Yo.query.filter_by(year = self.year) if len(self.attr.id) == 6: countries_top_export = countries_top.filter_by(top_export = self.attr.id) if self.classification == "sitc" else countries_top.filter_by(top_export_hs4 = self.attr.id) countries_top_import = countries_top.filter_by(top_import = self.attr.id) if self.classification == "sitc" else countries_top.filter_by(top_import_hs4 = self.attr.id) elif len(self.attr.id) == 8: countries_top_export = countries_top.filter_by(top_export_hs6 = self.attr.id) countries_top_import = countries_top.filter_by(top_import_hs6 = self.attr.id) countries_top_export = countries_top_export.order_by(desc('export_val')).limit(10).all() countries_top_import = countries_top_import.order_by(desc('import_val')).limit(10).all() if countries_top_export: countries_top_export = self.stringify_items(countries_top_export, None, "country") p4.append(_(u"%(product)s the top export of %(countries)s.", product=self.attr.get_name(verb=True), countries=countries_top_export)) if countries_top_import: countries_top_import = self.stringify_items(countries_top_import, None, "country") p4.append(_(u"%(product)s the top import of %(countries)s.", product=self.attr.get_name(verb=True), countries=countries_top_import)) if p4: all_paragraphs = all_paragraphs + p4 ''' Paragraph #5 ''' keywords = self.attr.get_keywords() if keywords: all_paragraphs.append(_(u"%(product)s also known as %(keywords)s.", product=self.attr.get_name(verb=True), keywords=keywords)) ''' Paragraph #1 ''' p1 = _(u"%(product)s a %(product_id_length)s digit %(classification)s product.", product=self.attr.get_name(verb=True), product_id_length=len(self.attr.get_display_id()), classification=self.classification.upper()) all_paragraphs.append(p1) return all_paragraphs
def prev(self): attr_class = getattr(attrs, self.type.capitalize()) return attr_class.query \ .filter(attr_class.id < self.attr.id) \ .filter(func.char_length(attr_class.id) == len(self.attr.id)) \ .order_by(attr_class.id.desc()).first()
def compute_stats(metric, shows, limit=None, offset=None, sort="desc", depth=None, filters=[]): cache_key = CAROUSEL_NS + "".join( ([metric] + shows) + ([str(limit), str(offset), sort, str(depth)])) prev = cached_query(cache_key) if prev: return pickle.loads(prev) kwargs = {metric: "dummy"} kwargs[shows[0]] = 'show' for show in shows[1:]: kwargs[show] = "dummy" table = table_helper.select_best_table(kwargs, allowed_when_not, possible_tables) if not table: raise Exception("No Valid Table Available!") filters = [] show_columns = [getattr(table, show) for show in shows] metric_col = getattr(table, metric) i = 0 for show_column in show_columns: show = shows[i] if table in no_length_column: depth_val = depth or max_depth[show] filters.append(func.char_length(show_column) == depth_val) elif show in max_depth: depth_val = depth or max_depth[show] filters.append( getattr(table, show + table_helper.LEN) == depth_val) i += 1 if table in filters_map: filters += filters_map[table] growth_regex = re.match('(num_emp)_growth(_5)?', metric) VAL_THRESOLD = 10000 if growth_regex: orig_col_name = growth_regex.group(1) orig_col = getattr(table, orig_col_name) filters.append(orig_col >= VAL_THRESOLD) elif metric == "wage_avg" and len(shows) == 1 and shows[0] == "bra_id": # when looking at wage_avg for cities, only look at places # with >= 50k people cities = cities_by_pop(50000) filters.append(table.bra_id.in_(cities)) columns = show_columns + [metric_col] results = query_helper.query_table(table, columns, filters, order=metric, limit=limit, sort=sort, offset=offset) cached_query(cache_key, pickle.dumps(results)) return results
def next(self): attr_class = getattr(attrs, self.type.capitalize()) return attr_class.query \ .filter(func.char_length(attr_class.id) == len(self.attr.id)) \ .filter(attr_class.id > self.attr.id).first()
def get_val(self, tbl, val_var, attr_type, dataset, latest_year = None): if latest_year == None: latest_year = __latest_year__[dataset] if val_var == "wage_avg": calc_var = val_var val_var = "wage" else: calc_var = None if attr_type == "bra": agg = {'population':func.sum, 'val_usd':func.sum, 'eci':func.avg, 'eci_wld':func.avg, 'pci':func.avg, 'val_usd_growth_pct':func.avg, 'val_usd_growth_pct_5':func.avg, 'val_usd_growth_val':func.avg, 'val_usd_growth_val_5':func.avg, 'distance':func.avg, 'distance_wld':func.avg, 'opp_gain':func.avg, 'opp_gain_wld':func.avg, 'rca':func.avg, 'rca_wld':func.avg, 'wage':func.sum, 'num_emp':func.sum, 'num_est':func.sum, 'ici':func.avg, 'oci':func.avg, 'wage_growth_pct':func.avg, 'wage_growth_pct_5':func.avg, 'wage_growth_val':func.avg, 'wage_growth_val_5':func.avg, 'num_emp_growth_pct':func.avg, 'num_emp_pct_5':func.avg, 'num_emp_growth_val':func.avg, 'num_emp_growth_val_5':func.avg, 'distance':func.avg, 'importance':func.avg, 'opp_gain':func.avg, 'required':func.avg, 'rca':func.avg} if self.id == "all": col_names = [val_var] col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names] total = tbl.query.with_entities(*col_vals) if dataset == "rais": total = total.filter(func.char_length(getattr(tbl,"isic_id")) == 1) elif dataset == "secex": total = total.filter(func.char_length(getattr(tbl,"hs_id")) == 2) elif dataset == "population": total = total.filter(func.char_length(getattr(tbl,"bra_id")) == 2) else: bras = self.parse_bras(self.id) # filter query if len(bras) > 1: col_names = [val_var] col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names] total = tbl.query.with_entities(*col_vals).filter(tbl.bra_id.in_([b["id"] for b in bras])) elif bras[0]["id"] != "all": total = tbl.query.filter(tbl.bra_id == bras[0]["id"]) else: total = tbl.query.filter(getattr(tbl, attr_type+"_id") == self.id) total = total.filter_by(year=latest_year).first() if total != None: if isinstance(total,tuple): val = total[0] else: val = getattr(total,val_var) if calc_var == "wage_avg": val = float(val)/getattr(total,"num_emp") else: val = 0 if val_var == "population": group = "" name = "population_{0}".format(latest_year) else: group = "{0}_stats_{1}".format(dataset,latest_year) if calc_var: name = calc_var else: name = "total_{0}".format(val_var) return {"name": name, "value": val, "group": group}
def parse_filter(kwargs,id_type,query,data_table,ret): from dataviva.attrs.models import Bra, Isic, Cbo, Hs, Wld query = query.group_by(getattr(data_table, id_type)) cat = id_type.split("_")[0] table = locals()[cat.title()] ids = kwargs[id_type].split("_") id_list = [] depth = None for id in ids: split_obj = id.split(".") kms = None if split_obj[0] != "all" and split_obj[0] != "show": obj_id = split_obj[0] if len(split_obj) > 1 and split_obj[1] != "show": kms = split_obj[1] ret_obj = table.query.get_or_404(obj_id).serialize() elif split_obj[0] == "all": obj_id = "all" if cat == "bra": ret_obj = Wld.query.get_or_404("sabra").serialize() ret_obj["id"] = "all" else: obj_id = None ret_obj = None split_depth = id.split("show.") if len(split_depth) > 1: obj_depth = int(split_depth[1]) else: obj_depth = None if obj_id: if kms: neighbors = table.query.get(obj_id).get_neighbors(kms) obj_list = [] for m in neighbors: if m.bra_id_dest == obj_id: obj_list.append(m.bra_id_origin) else: obj_list.append(m.bra_id_dest) if "show" not in id: ret_obj["aggregates"] = obj_list ret["aggregate"] = True id_list = id_list + obj_list elif obj_depth and obj_depth > len(obj_id): if "plr" in obj_id: obj_list = table.query.get(obj_id).pr.all() obj_list = [m.id for m in obj_list] id_list = id_list + obj_list else: obj_list = table.query.filter(\ and_(func.char_length(getattr(table,"id")) == obj_depth, \ getattr(table,"id").startswith(obj_id))) munic_list = [d.id for d in obj_list.all()] id_list = id_list + munic_list elif obj_id == "all": if cat == "bra" or cat == "hs" or cat == "wld": parent_depth = 2 else: parent_depth = 1 obj_list = table.query.filter(func.char_length(getattr(table,"id")) == parent_depth) obj_list = [d.id for d in obj_list.all()] ret_obj["aggregates"] = obj_list ret["aggregate"] = True id_list = id_list + obj_list else: id_list.append(obj_id) elif obj_depth: depth = obj_depth if ret_obj: if cat not in ret: ret[cat] = [] ret[cat].append(ret_obj) if len(id_list) > 0: query = query.filter(getattr(data_table,id_type).in_(id_list)) elif depth: query = query.filter(func.char_length(getattr(data_table,id_type)) == depth) if cat == "bra" and obj_id: if len(ret[cat]) == 0: ret[cat].append(Wld.query.get_or_404("sabra").serialize()) ret = location_values(ret,cat) return {"query": query, "ret": ret}
def _get_admin_scan(Key, Value, name, config, language, mandatory, local=False): # Create fieldConfig with some obvious return values fieldConfig = { 'language': language.english_name, 'mandatory': mandatory, 'local': local } # Keys # Prepare subquery for translations keyTranslated = Session.query( Key.fk_key.label('original_id'), Key.key.label('translated') ).\ filter(Key.language == language).\ subquery() # Query keys keys = Session.query( Key.key.label('original'), keyTranslated.c.translated.label('translated'), # Use column 'keyorvalue' to separate keys (0) from values (1) func.char_length('').label('keyorvalue') ).\ filter(Key.key == name).\ filter(Key.original == None).\ outerjoin(keyTranslated, keyTranslated.c.original_id == Key.id) query = keys # Predefined values available or not? store = False try: if 'predefined' in config: store = True predefined = config['predefined'] # Local predefined are one level further down elif 'predefined' in config['values']: store = True predefined = config['values']['predefined'] except KeyError: pass if store: # Values value_store = [] # Collect values first all_vals = [] for val in predefined: all_vals.append(val) # Prepare subquery for translations valuesTranslated = Session.query( Value.fk_value.label('original_id'), Value.value.label('translated') ).\ filter(Value.language == language).\ subquery() # Query values values = Session.query( Value.value.label('original'), valuesTranslated.c.translated.label('translated'), # Use column 'keyorvalue' to separate keys (0) from values (1) func.char_length(' ').label('keyorvalue') ).\ filter(Value.value.in_(all_vals)).\ filter(Value.original == None).\ outerjoin(valuesTranslated, valuesTranslated.c.original_id == Value.id) # Union with keys query = keys.union(values) # Go through each key/value for x in query.all(): if x.keyorvalue == 0: # Key fieldConfig['keyvalue'] = 'key' fieldConfig['exists'] = True fieldConfig['value'] = x.original fieldConfig['iconCls'] = 'ico-key' if language.id == 1: # Already in english fieldConfig['translation'] = 0 elif x.translated is None: # Not yet translated fieldConfig['translation'] = 1 else: fieldConfig['translation'] = x.translated else: # Value val = {} val['keyvalue'] = 'value' val['exists'] = True val['value'] = x.original val['mandatory'] = mandatory val['language'] = language.english_name val['local'] = local val['leaf'] = True val['iconCls'] = 'ico-value' if language.id == 1: # Already in english val['translation'] = 0 elif x.translated is None: # Not yet translated val['translation'] = 1 else: val['translation'] = x.translated value_store.append(val) try: # Remove value from array with all possible values all_vals.remove(x.original) except: pass # Key is not yet in database try: fieldConfig['keyvalue'] except KeyError: fieldConfig['keyvalue'] = 'key' fieldConfig['exists'] = False fieldConfig['value'] = name fieldConfig['iconCls'] = 'ico-key' fieldConfig['translation'] = 1 # Not yet translated if store: # Add values which are not yet inserted to store for i in all_vals: val = {} val['keyvalue'] = 'value' val['exists'] = False val['value'] = i val['mandatory'] = mandatory val['language'] = language.english_name val['local'] = local val['leaf'] = True val['iconCls'] = 'ico-value' val['translation'] = 1 # Not yet translated value_store.append(val) fieldConfig['children'] = value_store else: fieldConfig['leaf'] = True return fieldConfig
def sections(self): sections = [] ''' Trade Section ''' if self.attr.id == "xxwld": export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) this_yo = self.models.Yo.query.filter_by(year=self.year).all() export_val = sum([o.export_val for o in this_yo]) export_subtitle = _( u"The total world trade in %(year)s was %(export_val)s.", year=self.year, export_val=num_format(export_val, "export_val")) export_subtitle += u" " past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by(year=past_yr).all() growth_val = median([o.export_val_growth_pct_5 for o in this_yo]) chg = "increased" if growth_val >= 0 else "decreased" export_subtitle += _(u"During the last five years exports have %(increased_decreased)s at a median annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s.", increased_decreased=chg, change_rate=num_format(growth_val*100), \ past_export_val=num_format(sum([o.export_val for o in past_yo])), past_year=past_yr, current_export_val=num_format(export_val), current_year=self.year) export_subtitle += u" " top_exports = self.models.Yp.query.filter_by( year=self.year, hs92_id_len=6).order_by(desc("export_val")).limit(2).all() export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total products exported, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/export_val)*100), \ second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/export_val)*100)) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yo_exp = self.models.Yo.query.filter_by(year=self.year).order_by( desc("export_val")).limit(5).all() origin_list = self.stringify_items(yo_exp, "export_val", "country") origin_subtitle = _(u"The top exporters globally are %(origins)s.", origins=origin_list) trade_section = { "builds": [ { "title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle }, { "title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle }, ] } else: export_subtitle, import_subtitle, dest_subtitle, origin_subtitle = [ None ] * 4 export_tmap = Build("tree_map", "hs92", "export", self.attr, "all", "show", self.year) import_tmap = Build("tree_map", "hs92", "import", self.attr, "all", "show", self.year) yop_base = self.models.Yop.query.filter_by(year=self.year, origin=self.attr, hs92_id_len=6) # get growth past_yr = self.year - 5 past_yo = self.models.Yo.query.filter_by( year=past_yr, country=self.attr).first() this_yo = self.models.Yo.query.filter_by( year=self.year, country=self.attr).first() exp_val_stat = filter(lambda s: s["key"] == "export_val", self.stats()) if exp_val_stat and this_yo: exp_val_stat = exp_val_stat.pop() export_subtitle = "" if self.attr.id != "xxwld": exp_rank = num_format( exp_val_stat["rank"], "ordinal") if exp_val_stat["rank"] > 1 else "" export_subtitle += _( u"In %(year)s %(country)s exported $%(export_val)s, making it the %(export_rank)s largest exporter in the world.", year=self.year, country=self.attr.get_name(article=True), export_val=num_format(exp_val_stat["val"]), export_rank=exp_rank) export_subtitle += u" " if past_yo and this_yo.export_val_growth_pct_5: chg = "increased" if this_yo.export_val_growth_pct_5 >= 0 else "decreased" export_subtitle += _(u"During the last five years the exports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_export_val)s in %(past_year)s to $%(current_export_val)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.export_val_growth_pct_5*100), \ past_export_val=num_format(past_yo.export_val), past_year=past_yr, current_export_val=num_format(this_yo.export_val), current_year=self.year) export_subtitle += u" " top_exports = yop_base.order_by( desc("export_val")).limit(2).all() if top_exports: # raise Exception(top_exports[0].product.get_profile_link(), num_format((top_exports[0].export_val/exp_val_stat["val"])*100), self.attr.get_name(article="of"), top_exports[1].product.get_profile_link(), num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) export_subtitle += _(u"The most recent exports are led by %(top_export)s which represent %(top_export_pct)s%% of the total exports %(of_country)s, followed by %(second_export)s, which account for %(second_export_pct)s%%.", top_export=top_exports[0].product.get_profile_link(), top_export_pct=num_format((top_exports[0].export_val/exp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_export=top_exports[1].product.get_profile_link(), second_export_pct=num_format((top_exports[1].export_val/exp_val_stat["val"])*100)) imp_val_stat = filter(lambda s: s["key"] == "import_val", self.stats()) if imp_val_stat and this_yo: imp_val_stat = imp_val_stat.pop() import_subtitle = "" if self.attr.id != "xxwld": imp_rank = num_format( imp_val_stat["rank"], "ordinal") if imp_val_stat["rank"] > 1 else "" import_subtitle += _( u"In %(year)s %(country)s imported $%(import_val)s, making it the %(import_rank)s largest importer in the world.", year=self.year, country=self.attr.get_name(article=True), import_val=num_format(imp_val_stat["val"]), import_rank=imp_rank) import_subtitle += u" " if past_yo and this_yo.import_val_growth_pct_5: chg = "increased" if this_yo.import_val_growth_pct_5 >= 0 else "decreased" import_subtitle += _(u"During the last five years the imports %(of_country)s have %(increased_decreased)s at an annualized rate of %(change_rate)s%%, from $%(past_import_val)s in %(past_year)s to $%(current_import_val)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_decreased=chg, change_rate=num_format(this_yo.import_val_growth_pct_5*100), \ past_import_val=num_format(past_yo.import_val), past_year=past_yr, current_import_val=num_format(this_yo.import_val), current_year=self.year) import_subtitle += u" " top_imports = yop_base.order_by( desc("import_val")).limit(2).all() if top_imports: import_subtitle += _(u"The most recent imports are led by %(top_import)s which represent %(top_import_pct)s%% of the total imports %(of_country)s, followed by %(second_import)s, which account for %(second_import_pct)s%%.", top_import=top_imports[0].product.get_profile_link(), top_import_pct=num_format((top_imports[0].import_val/imp_val_stat["val"])*100), \ of_country=self.attr.get_name(article="of"), second_import=top_imports[1].product.get_profile_link(), second_import_pct=num_format((top_imports[1].import_val/imp_val_stat["val"])*100)) dests_tmap = Build("tree_map", "hs92", "export", self.attr, "show", "all", self.year) yod_exp = self.models.Yod.query.filter_by( year=self.year, origin=self.attr).order_by(desc("export_val")).limit(5).all() if yod_exp: dest_list = self.stringify_items(yod_exp, "export_val", "dest") dest_subtitle = _( u"The top export destinations %(of_country)s are %(destinations)s.", of_country=self.attr.get_name(article="of"), destinations=dest_list) origins_tmap = Build("tree_map", "hs92", "import", self.attr, "show", "all", self.year) yod_imp = self.models.Yod.query.filter_by( year=self.year, dest=self.attr).order_by(desc("export_val")).limit(5).all() if yod_imp: origin_list = self.stringify_items(yod_imp, "export_val", "origin") origin_subtitle = _( u"The top import origins %(of_country)s are %(origins)s.", of_country=self.attr.get_name(article="of"), origins=origin_list) # trade balance viz -- first_yo = self.models.Yo.query.filter_by( year=available_years["hs92"][-1], country=self.attr).first() tb_subtitle = "" tb_build = Build("line", "hs92", "show", self.attr, "all", "all", available_years["hs92"]) if first_yo: net_trade = this_yo.export_val - this_yo.import_val trade_balance = _("positive") if net_trade >= 0 else _( "negative") trade_direction = _("exports") if net_trade >= 0 else _( "imports") tb_subtitle = _( u"As of %(year)s %(country)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=self.year, country=self.attr.get_name(article=True), positive_negative=trade_balance, net_trade=num_format(abs(net_trade)), exports_imports=trade_direction) old_yo = self.models.Yo.query.filter_by( year=available_years["hs92"][0], country=self.attr).first() if old_yo: old_net_trade = old_yo.export_val - old_yo.import_val old_trade_balance = _( "positive") if old_net_trade >= 0 else _("negative") old_trade_direction = _( "exports") if old_net_trade >= 0 else _("imports") is_diff = True if old_trade_balance != trade_balance else False still_or_not = _( "still") if old_trade_balance == trade_balance else "" tb_subtitle += u" " tb_subtitle += _( u"As compared to their trade balance in %(year)s when they %(still)s had a %(positive_negative)s trade balance of $%(net_trade)s in net %(exports_imports)s.", year=available_years["hs92"][0], still=still_or_not, positive_negative=old_trade_balance, net_trade=num_format(abs(old_net_trade)), exports_imports=old_trade_direction) trade_section = { "builds": [ { "title": _(u"Exports"), "build": export_tmap, "subtitle": export_subtitle, "tour": "This is just a test", "seq": 5 }, { "title": _(u"Imports"), "build": import_tmap, "subtitle": import_subtitle }, { "title": _(u"Trade Balance"), "build": tb_build, "subtitle": tb_subtitle }, { "title": _(u"Destinations"), "build": dests_tmap, "subtitle": dest_subtitle }, { "title": _(u"Origins"), "build": origins_tmap, "subtitle": origin_subtitle }, ] } sections.append(trade_section) ''' Product Space Section ''' subtitle = False if self.attr.id != "xxwld": num_exports_w_rca = db.session.query(func.count(self.models.Yop.hs92_id)) \ .filter_by(year = self.year, origin = self.attr) \ .filter(self.models.Yop.export_rca >= 1) \ .filter(func.char_length(self.models.Yop.hs92_id)==6) \ .scalar() this_attr_yo = attrs.Yo.query.filter_by(year=self.year, country=self.attr).first() if this_attr_yo: eci = this_attr_yo.eci eci_rank = this_attr_yo.eci_rank if eci_rank: subtitle = _( u"The economy %(of_country)s has an Economic Complexity Index (ECI) of %(eci)s making it the %(eci_rank)s most complex country.", of_country=self.attr.get_name(article="of"), eci=num_format(eci), eci_rank=num_format(eci_rank, "ordinal")) subtitle += u" " else: subtitle = "" subtitle += _(u"%(country)s exports %(num_of_exports)s products with revealed comparative advantage " \ u"(meaning that its share of global exports is larger than what " \ u"would be expected from the size of its export economy " \ u"and from the size of a product’s global market).", country=self.attr.get_name(article=True), num_of_exports=num_exports_w_rca) product_space = Build("network", "hs92", "export", self.attr, "all", "show", self.year) ps_text = _( u"The product space is a network connecting products that are likely to be co-exported and can be used to predict the evolution of a country’s export structure." ) if subtitle: ps_text = u"{}</p><p>{}".format(ps_text, subtitle) ps_section = { "title": _(u"Economic Complexity %(of_country)s", of_country=self.attr.get_name(article="of")), "builds": [{ "title": _(u"Product Space"), "build": product_space, "subtitle": ps_text, "tour": "The product space...", "seq": 6 }] } ''' PGI Section ''' if self.attr.id != "xxwld": pgi_product_space = Build("network", "sitc", "pgi", self.attr, "all", "show", available_years["sitc"][-1]) subtitle = _( "In this version of the product space products are colored according to their Product Gini Index, or PGI. The PGI of a product is the level of income inequality that we expect for the countries that export a product. For more information see: %(paper1)s and %(paper2)s.", country=self.attr.get_name(article=True), paper1= "<a target='_blank' href='https://arxiv.org/abs/1505.07907'>Linking Economic Complexity, Institutions and Income Inequality</a>", paper2= "<a target='_blank' href='https://arxiv.org/abs/1701.03770'>The structural constraints of income inequality in Latin America</a>" ) ps_section["builds"].append({ "title": _(u"Complexity and Income Inequality"), "build": pgi_product_space, "subtitle": subtitle }) ''' ECI Ranking Section ''' if self.attr.id == "xxwld": line_rankings = Build( "line", "sitc", "eci", "show", "all", "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = 1980 start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year subtitle = _( "The Economic Complexity of each country visualized over the past %(year_range)s years.", year_range=year_range) ps_section["builds"].append({ "title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle }) elif this_attr_yo and this_attr_yo.eci != None: line_rankings = Build( "line", "sitc", "eci", "show", self.attr, "all", [y for y in available_years["sitc"] if y >= 1964]) start_year = earliest_data.get(self.attr.id, 1980) start_year = max(1964, start_year) if start_year != 1980 else 1964 year_range = self.year - start_year attr_yo_historic = attrs.Yo.query.filter_by( country=self.attr).filter(attrs.Yo.year == start_year).first() if attr_yo_historic and attr_yo_historic.eci_rank != None and this_attr_yo.eci_rank != None: eci_delta = this_attr_yo.eci_rank - attr_yo_historic.eci_rank inc_dec = _('increased') if eci_delta < 0 else _('decreased') subtitle = _( "The Economic Complexity ranking %(of_country)s has %(increased_or_decreased)s by %(rank_delta)s places over the past %(year_range)s years from %(old_eci)s in %(old_year)s to %(current_eci)s in %(current_year)s.", of_country=self.attr.get_name(article="of"), increased_or_decreased=inc_dec, rank_delta=abs(eci_delta), year_range=year_range, old_eci=num_format(attr_yo_historic.eci_rank, "ordinal"), old_year=start_year, current_eci=num_format(this_attr_yo.eci_rank, "ordinal"), current_year=self.year) ps_section["builds"].append({ "title": _(u"Economic Complexity Ranking"), "build": line_rankings, "subtitle": subtitle }) sections.append(ps_section) sections.append({ "title": _(u"More on %(country)s from our other sites", country=self.attr.get_name(article=True)), "source": "sisters" }) ''' DataViva ''' # dv_section = make_dv_section(self) # sections.append(dv_section) ''' Data USA ''' if self.attr.id == "nausa": us_section = make_us_section() sections.append(us_section) ''' Data Africa ''' if any(country[0] == self.attr.id for country in data_africa_countries): da_country = filter(lambda x: x[0] == self.attr.id, data_africa_countries) africa_section = make_africa_section(self, da_country[0]) sections.append(africa_section) # raise Exception("found dataafrican country: {}".format(da_country[0])) ''' Pantheon ''' pantheon_id = "all" if self.attr.id == "xxwld" else self.attr.id_2char if pantheon_id: if self.attr.id != "xxwld": pantheon_id = pantheon_id.upper() pantheon_section = make_pantheon_section(pantheon_id, self.attr) sections.append(pantheon_section) return sections
def cities_by_pop(value): Ybs = attrs.Ybs filters = [Ybs.stat_id == 'pop', Ybs.stat_val >= value, Ybs.year == __latest_year__['stats'], func.char_length(Ybs.bra_id) == 9] res = Ybs.query.filter(*filters).with_entities(Ybs.bra_id).all() if res: return [row[0] for row in res] return res
def intro(self): all_paragraphs = [] ''' Paragraph #2 ''' # get total world trade rank this_yp = self.models.Yp.query.filter_by(year=self.year, product=self.attr).first() all_yp = self.models.Yp.query.filter_by(year = self.year) \ .filter(func.char_length(getattr(self.models.Yp, "{}_id".format(self.classification))) == len(self.attr.id)) \ .order_by(desc("export_val")).all() if this_yp: econ_rank = num_format(all_yp.index(this_yp) + 1, "ordinal") if all_yp.index(this_yp) else "" # get PCI ranking p2 = _(u"%(product)s the %(economic_rank)s most traded product", product=self.attr.get_name(verb=True), economic_rank=econ_rank) pci_rank = this_yp.pci_rank if pci_rank: pci_ranking_link = u"<a href='/en/rankings/hs92/'>{} (PCI)</a>".format( _(u"Product Complexity Index")) pci_rank = num_format(pci_rank, "ordinal") if pci_rank > 1 else "" p2 += u" " p2 += _( u"and the %(pci_rank)s most complex product according to the %(pci_ranking_link)s", pci_rank=pci_rank, pci_ranking_link=pci_ranking_link) p2 += "." all_paragraphs.append(p2) ''' Paragraph #3 ''' yop_exp = self.models.Yop.query.filter_by( year=self.year, product=self.attr).filter( self.models.Yop.export_val != None).order_by( desc("export_val")).limit(5).all() if yop_exp: exporters = self.stringify_items(yop_exp, "export_val", "origin") yop_imp = self.models.Yop.query.filter_by( year=self.year, product=self.attr).filter( self.models.Yop.import_val != None).order_by( desc("import_val")).limit(5).all() importers = self.stringify_items(yop_imp, "import_val", "origin") p3 = _( u"The top exporters of %(product)s are %(exporters)s. The top importers are %(importers)s.", product=self.attr.get_name(), exporters=exporters, importers=importers) all_paragraphs.append(p3) ''' Paragraph #4 ''' p4 = [] # find out which countries this product is their #1 export/import countries_top = self.models.Yo.query.filter_by(year=self.year) if len(self.attr.id) == 6: countries_top_export = countries_top.filter_by( top_export=self.attr.id ) if self.classification == "sitc" else countries_top.filter_by( top_export_hs4=self.attr.id) countries_top_import = countries_top.filter_by( top_import=self.attr.id ) if self.classification == "sitc" else countries_top.filter_by( top_import_hs4=self.attr.id) elif len(self.attr.id) == 8: countries_top_export = countries_top.filter_by( top_export_hs6=self.attr.id) countries_top_import = countries_top.filter_by( top_import_hs6=self.attr.id) countries_top_export = countries_top_export.order_by( desc('export_val')).limit(10).all() countries_top_import = countries_top_import.order_by( desc('import_val')).limit(10).all() if countries_top_export: countries_top_export = self.stringify_items( countries_top_export, None, "country") p4.append( _(u"%(product)s the top export of %(countries)s.", product=self.attr.get_name(verb=True), countries=countries_top_export)) if countries_top_import: countries_top_import = self.stringify_items( countries_top_import, None, "country") p4.append( _(u"%(product)s the top import of %(countries)s.", product=self.attr.get_name(verb=True), countries=countries_top_import)) if p4: all_paragraphs = all_paragraphs + p4 ''' Paragraph #5 ''' keywords = self.attr.get_keywords() if keywords: all_paragraphs.append( _(u"%(product)s also known as %(keywords)s.", product=self.attr.get_name(verb=True), keywords=keywords)) ''' Paragraph #1 ''' p1 = _( u"%(product)s a %(product_id_length)s digit %(classification)s product.", product=self.attr.get_name(verb=True), product_id_length=len(self.attr.get_display_id()), classification=self.classification.upper()) all_paragraphs.append(p1) return all_paragraphs
def attrs(attr="bra",Attr_id=None): Attr = globals()[attr.title()] Attr_weight_mergeid = "{0}_id".format(attr) if attr == "bra": Attr_weight_tbl = Yb Attr_weight_col = "population" elif attr == "isic": Attr_weight_tbl = Yi Attr_weight_col = "num_emp" elif attr == "cbo": Attr_weight_tbl = Yo Attr_weight_col = "num_emp" elif attr == "hs": Attr_weight_tbl = Yp Attr_weight_col = "val_usd" elif attr == "wld": Attr_weight_tbl = Yw Attr_weight_col = "val_usd" depths = {} depths["bra"] = [2,4,7,8] depths["isic"] = [1,3,5] depths["cbo"] = [1,2,4] depths["hs"] = [2,4,6] depths["wld"] = [2,5] depth = request.args.get('depth', None) order = request.args.get('order', None) offset = request.args.get('offset', None) limit = request.args.get('limit', None) if offset: offset = float(offset) limit = limit or 50 lang = request.args.get('lang', None) or g.locale ret = {} dataset = "rais" if Attr == Cbo or Attr == Hs: dataset = "secex" latest_year = __latest_year__[dataset] cache_id = request.path + lang if depth: cache_id = cache_id + "/" + depth # first lets test if this query is cached cached_q = cached_query(cache_id) if cached_q and limit is None: ret = make_response(cached_q) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret # if an ID is supplied only return that if Attr_id: # the '.show.' indicates that we are looking for a specific nesting if ".show." in Attr_id: this_attr, ret["nesting_level"] = Attr_id.split(".show.") # filter table by requested nesting level attrs = Attr.query \ .filter(Attr.id.startswith(this_attr)) \ .filter(func.char_length(Attr.id) == ret["nesting_level"]).all() # the 'show.' indicates that we are looking for a specific nesting elif "show." in Attr_id: ret["nesting_level"] = Attr_id.split(".")[1] # filter table by requested nesting level attrs = Attr.query.filter(func.char_length(Attr.id) == ret["nesting_level"]).all() # the '.' here means we want to see all attrs within a certain distance elif "." in Attr_id: this_attr, distance = Attr_id.split(".") this_attr = Attr.query.get_or_404(this_attr) attrs = this_attr.get_neighbors(distance) else: attrs = [Attr.query.get_or_404(Attr_id)] ret["data"] = [fix_name(a.serialize(), lang) for a in attrs] # an ID/filter was not provided else: query = db.session.query(Attr,Attr_weight_tbl) \ .outerjoin(Attr_weight_tbl, and_(getattr(Attr_weight_tbl,"{0}_id".format(attr)) == Attr.id, Attr_weight_tbl.year == latest_year)) if depth: query = query.filter(func.char_length(Attr.id) == depth) else: query = query.filter(func.char_length(Attr.id).in_(depths[attr])) if order: direction = "asc" if "." in order: o, direction = order.split(".") else: o = order if o == "name": o = "name_{0}".format(lang) if o == Attr_weight_col: order_table = Attr_weight_tbl else: order_table = Attr if direction == "asc": query = query.order_by(asc(getattr(order_table,o))) elif direction == "desc": query = query.order_by(desc(getattr(order_table,o))) if limit: query = query.limit(limit).offset(offset) attrs_all = query.all() # just get items available in DB attrs_w_data = None if depth is None and limit is None: attrs_w_data = db.session.query(Attr, Attr_weight_tbl) \ .filter(getattr(Attr_weight_tbl, Attr_weight_mergeid) == Attr.id) \ .group_by(Attr.id) # raise Exception(attrs_w_data.all()) attrs_w_data = [a[0].id for a in attrs_w_data] attrs = [] for i, a in enumerate(attrs_all): b = a[0].serialize() if a[1]: b[Attr_weight_col] = a[1].serialize()[Attr_weight_col] else: b[Attr_weight_col] = 0 a = b if attrs_w_data: a["available"] = False if a["id"] in attrs_w_data: a["available"] = True if Attr_weight_col == "population" and len(a["id"]) == 8 and a["id"][:2] == "mg": plr = Bra.query.get_or_404(a["id"]).pr2.first() if plr: a["plr"] = plr.id if order: a["rank"] = int(i+offset+1) attrs.append(fix_name(a, lang)) ret["data"] = attrs ret = jsonify(ret) ret.data = gzip_data(ret.data) if limit is None and cached_q is None: cached_query(cache_id, ret.data) ret.headers['Content-Encoding'] = 'gzip' ret.headers['Content-Length'] = str(len(ret.data)) return ret