def main(): with open('relations/topic_dict')as f: topic_dict = dict([(v,k) for k,v in loads(f.read()).items()]) with open('relations/topic_member') as f: topic_member = loads(f.read()) topic_member = sorted(topic_member.iteritems(),key=lambda x:len(x[1]),reverse = True) for k,v in topic_member: print len(v) ,' - ',topic_dict[int(k)] pass
def create_menu(): _url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={}&secret={}" _rp = requests.get(_url.format(WECHAT.APPID, WECHAT.APPSECRET)) access_token = loads(_rp.text)['access_token'] url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token={}".format(access_token) response = requests.post(url, data=dumps(MENU_TEMPLE)) return loads(response.text)['errmsg']
def translate(text, language): ## Choose random server from the server list. ## TODO: Make this less ugly :) result = json.loads(requests.get(config["etcd"]["connection_string"]+"language:{0}".format(language)).text) server = choice(json.loads(result["value"])["servers"]) proxy = xmlrpc.client.ServerProxy(server) try: translation = proxy.translate({"text":text.lower()}) except Exception as err: return err return json.dumps(translation)
def add_gadm1_codes_to_gadm1_json(self): """ Add GADM level 1 information to level 1 map outlines from GADM. """ with open('../static/topojson/gadm1_map.json') as f: ga = json.loads(f.read()) ga['objects']['regions'] = ga['objects']['_bejeezus'] ga['objects'].pop('_bejeezus', None) for region in ga['objects']['regions']['geometries']: props = region['properties'] try: region['properties']['iso'] = region['properties']['ISO'] region['properties']['adm1'] = '{0:02d}'.format(region['properties']['ID_1']) region['properties']['adm0'] = '{0}'.format(region['properties']['ID_0']) region['properties']['adm'] = '{0}{1:02d}'.format(region['properties']['ID_0'], region['properties']['ID_1']) region['properties']['name'] = region['properties']['NAME_1'] region['properties']['country'] = region['properties']['NAME'] region['properties'].pop('ISO', None) region['properties'].pop('ID_1', None) region['properties'].pop('ID_0', None) region['properties'].pop('ENGTYPE_1', None) region['properties'].pop('TYPE_1', None) region['properties'].pop('NL_NAME_1', None) region['properties'].pop('VARNAME_1', None) except: pass with open('../static/topojson/atlas_gadm1.json', 'w') as f: f.write(json.dumps(ga)) return ga
def add_new_data(self,session_info,new_data,old_ack_id): new_data=json.loads(new_data) #print new_data user_status=self.get_secure_cookie(session_info['session_id']) if(not user_status or not (user_status=='admin') or not old_ack_id): self.finish({'error':"You cannot sir !"}) return old_ack_id=int(old_ack_id) user_stream_data=None if(new_data.get('pic_state')): session_info['start_from']=old_ack_id+1 ##one change done jus now , so add one session_info['pic_state']=new_data['pic_state'] session_info['data']=[] user_stream_data={'data':[],'pic_state':new_data['pic_state']} else: session_info['data'].append(new_data['data']) user_stream_data={'data':[new_data['data']]} ack_id=session_info['start_from']+len(session_info['data']) user_stream_data['ack_id']=ack_id for i in session_info['connections'].keys(): i.finish(user_stream_data) del session_info['connections'][i] self.finish({'ack_id':ack_id}) # to admin
def post(self,game_session): self.game_session=game_session game=sessions[game_session] uid=self.get_secure_cookie("uid") try: index=json.loads(self.get_argument("i",None)) except: self.finish() return if(not index): if(not game["users"][uid]): game["user_connections"][uid]=self return self.finish({"updates":game["users"][uid]+["reconnect"]}) game["users"][uid]=[] return if(game["current_uid"]!=uid): #ok update game state and send the status update for the other user and keep this user in wait mode if(game["state"][index[0]][index[1]]!=False): self.finish({"updates":["your move"]}) return game["state"][index[0]][index[1]]=uid uid_other=game["current_uid"] game["user_connections"][uid]=self game["current_uid"]=uid try: game["user_connections"][uid_other].finish({'updates':["update "+json.dumps(index),"your move"]}) except: game["users"][uid_other]+=["update "+json.dumps(index),"your move"] self.finish({"updates":["wait","reconnect"]}) return self.finish({"updates":["wait"]})
def runTest(self): ''' http://github.com/rtyler/py-yajl/issues#issue/8 ''' encoded = yajl.dumps([(2,3,)]) decoded = yajl.loads(encoded) self.assertEquals(len(decoded), 1) self.assertEquals(decoded[0][0], 2) self.assertEquals(decoded[0][1], 3)
def fetch(self, want="*"): if want == "*": req = "*" else: req = json.dumps(want) self.port.write(req) return json.loads(self.port.read())
def tf_idf_by_zhihu(): current_path = os.path.dirname(os.path.abspath(__file__)) infile = join(current_path, 'data/out.js') outfile = join(current_path, 'zhihu.idf') idf = Idf() with open(infile) as lib: for line in lib: l = loads(line) idf.append( l['title'] ) for j in l['answer']: idf.append(j['answer']) with open(join(current_path,"data/review.txt")) as review: result = [] for line in review: line = line.strip() if not line: continue if line.startswith(">->->"): if result: line = line.split(" ",5) result.append(line[-1]) txt = "\n".join(result) idf.append(txt) print line[1] #print txt #raw_input() result = [] else: result.append(line) idf.tofile(outfile)
def runTest(self): dumpable = [11889582081] rc = yajl.dumps(dumpable) self.assertEquals(rc, '[11889582081]') rc = yajl.loads(rc) self.assertEquals(rc, dumpable)
def testLong(self): ''' http://github.com/rtyler/py-yajl/issues#issue/10 ''' if is_python3(): return data = {long(1) : 2} result = yajl.loads(yajl.dumps(data)) self.assertEquals({'1': 2}, result)
def main(): if not len(sys.argv) == 2: print 'Requires a couple of arguments' return -1 since = time.strftime('%Y-%m-%d', time.localtime()) qs = urllib.urlencode({'q' : sys.argv[1], 'since' : since}) url_fd = urllib2.urlopen('%s?%s' % (SEARCH_URL, qs)) body = url_fd.read() if not body: print 'Empty body?' return -1 data = yajl.loads(body) if not data.get('results'): return 0 for r in data.get('results', []): from_user = r.get('from_user', '').lower() if not from_user: continue if from_user == 'jenkinsci': continue retweets = r.get('metadata', {}).get('recent_retweets', 0) print '%s : %s \n\t%s' % (from_user, retweets, r.get('text')) return 0
def add_gadm1_codes_to_ne1_json(self): """ Add GADM level 1 information to level 1 map outlines from Natural Earth. """ gadm = pd.DataFrame.from_csv('./gadm1.meta.csv', index_col=4) gadm.index = np.arange(len(gadm)) with open('../static/topojson/ne1_s0001.json') as f: ne = json.loads(f.read()) for region in ne['objects']['regions']['geometries']: props = region['properties'] try: country = pycountry.countries.get(alpha2=props['iso_a2']) region['properties']['iso'] = country.alpha3 id0 = gadm.ix[gadm['ISO'] == country.alpha3].\ ix[gadm['NAME'] == props['name'].encode('latin_1')].ID_0.values[0] id1 = gadm.ix[gadm['ISO'] == country.alpha3].\ ix[gadm['NAME'] == props['name'].encode('latin_1')].ID_1.values[0] region['properties']['adm1'] = '{0:02d}'.format(id1) region['properties']['adm0'] = '{0}'.format(id0) region['properties']['adm'] = '{0}{1:02d}'.format(id0, id1) except: pass with open('../static/topojson/atlas_gadm1.json', 'w') as f: f.write(json.dumps(ne)) return ne
def down_playlist(): pl_url = "http://douban.fm/j/mine/playlist?type=n&h=|432599:p&channel=1&from=mainsite&r=ecc38a4d94" pl_f = urllib2.urlopen(pl_url) data = pl_f.read() pl = yajl.loads(data) pl_f.close() return pl['song']
def test_chinese(self): ''' Testing with simplified chinese for http://github.com/rtyler/py-yajl/issues/#issue/7 ''' char = '\u65e9\u5b89, \u7238\u7238' if not is_python3(): from tests import python2 char = python2.IssueSevenTest_chinese_char out = yajl.dumps(char).lower() self.assertEquals(out, '"\\u65e9\\u5b89, \\u7238\\u7238"') out = yajl.dumps(out).lower() self.assertEquals(out, '"\\"\\\\u65e9\\\\u5b89, \\\\u7238\\\\u7238\\""') out = yajl.loads(out) self.assertEquals(out, '"\\u65e9\\u5b89, \\u7238\\u7238"') out = yajl.loads(out) self.assertEquals(out, char)
def dwz_cn(url): url = urllib.quote(url) result = urlfetch('http://dwz.cn/create.php', data='url=%s'%url) result = loads(result) if 'err_msg' in result: print result['err_msg'] else: return result['tinyurl']
def decode_json(self, decode=None, **kwargs): if not decode: return {} piece = decode.split('/')[-1] entry = self.redis[piece] if not entry: return {} entry = json.loads(entry) return {'url' : entry['url'], 'encoded' : 'http://urlenco.de/%s' % piece}
def add_test(v): # These modules have a few round-tripping problems... try: assert cjson.decode(cjson.encode(v)) == v assert yajl.loads(yajl.dumps(v)) == v except Exception: pass else: TESTS.append((v,tnetstring.dumps(v),cjson.encode(v)))
def _txt_tag_generator(self): path = self.path tag2id = self.tag2id data_files = glob(join(path, "*.data")) zhihu_data = [join(path, "zhihu")] zhihu_data.extend(data_files) print "Processing..." g = open(join(path, "topic_dict")) topic_dict = loads(g.read()) count = 0 for data_src in zhihu_data: print "Processing...", data_src with open(data_src) as f: for line in f: # if count > 1000: # break # count += 1 data = loads(line) if "tags" in data: tags = data["tags"] else: continue tags_processed = [] if "zhihu" not in data_src: for tag in tags: if tag in topic_dict and tag not in banned_tag_list: tags_processed.append(tag) if not tags_processed: continue else: tags = tags_processed # print tags # raw_input() """ 查找上级标签 """ parent_list = self.parent_tag_finder.get_parent_tag_list_by_list(tags) tags.extend(parent_list) id_list = tag2id.id_list_by_word_list(tags) yield data["txt"], id_list
def collect(key): logger.debug('Collecting data from AEMET started') try: result = get(url_aemet, headers={'api_key': key}) except exceptions.ConnectionError: logger.error('Collecting link from AEMET failed due to the connection problem') return False if result.status_code not in http_ok: logger.error('Collecting link from AEMET failed due to the return code') return False logger.debug('Remaining requests %s', result.headers.get('Remaining-request-count')) result = loads(result.text) try: result = get(result['datos']) except exceptions.ConnectionError: logger.error('Collecting data from AEMET failed due to the connection problem') return False if result.status_code not in http_ok: logger.error('Collecting data from AEMET failed due to the return code') return False result = loads(result.text) for i in range(len(result) - 1, -1, -1): if result[i]['idema'] not in stations: del result[i] if latest: check = list() result = sorted(result, key=lambda k: (k['idema'], k['fint']), reverse=True) for item in range(len(result) - 1, -1, -1): if result[item]['idema'] in check: del result[item] else: check.append(result[item]['idema']) logger.debug("Collection data from AEMET ended") return result
def consumetag(message, tag): try: messg = simplejson.loads(message) cmd = "%s%s" % (messg, tag) result = eval(cmd) return str(result) except KeyError: return '' except TypeError: return ''
def runTest(self): ''' http://github.com/rtyler/py-yajl/issues#issue/8 ''' encoded = yajl.dumps([( 2, 3, )]) decoded = yajl.loads(encoded) self.assertEquals(len(decoded), 1) self.assertEquals(decoded[0][0], 2) self.assertEquals(decoded[0][1], 3)
def curt_cc(url): _url = url url = urllib.quote(url) result = urlfetch('http://curt.cc/service/generator.php?url=%s' % url) try: result = loads(result) return result['url'] except: traceback.print_exc() return _url
def trim_aggr_data(self, var): """ Trim extra scenarios and irrigations from json file (for initial loading of page). """ with open('../static/json/aggr/{}_gadm{}.json'.format(var, self._adm), 'r') as f: data = json.loads(f.read()) data['data'] = {k: np.array(v)[:, 0, 0].tolist() for k, v in data['data'].iteritems()} with open('../static/json/aggr/{}_gadm{}_home.json'.format(var, self._adm), 'w') as f: f.write(json.dumps(data))
def curt_cc(url): _url = url url = urllib.quote(url) result = urlfetch('http://curt.cc/service/generator.php?url=%s'%url) try: result = loads(result) return result['url'] except: traceback.print_exc() return _url
def getValueForItemFromJson(response, item_name): try: string_name = response.text strDecoded = string_name.decode().encode('utf-8') json = simplejson.loads(strDecoded) value = json[item_name] return value, 'ok' except: print 'Unable to getValueForItemFromJson: %s, %s' % (item_name, response) return response, 'error'
def __init__(self, cfg, as_dict=True): # super(ParallelPipe,self).__init__() cfg = sod(cfg) self.models = {} self.as_dict = as_dict self.no_grad = False for k, v in cfg.items(): self.models[k] = Compile( json.loads(open(cfg[k], 'r').read())) if isinstance( cfg[k], str) else ParallelPipe(cfg[k])
def setup_stations(stations_limit): result = dict() limit_on = False limit_off = False content = None resp = None if 'include' in stations_limit: limit_on = True if 'exclude' in stations_limit: limit_off = True try: resp = get(url_stations) except exceptions.ConnectionError: logger.error( 'Collecting the list of stations from IPMA failed due to connection problem' ) exit(1) if resp.status_code in http_ok: content = loads(resp.text)['features'] else: logger.error( 'Collecting the list of stations from IPMA failed due to the return code %s', resp.status_code) exit(1) for station in content: station_code = str(station['properties']['idEstacao']) if limit_on: if station_code not in stations_limit['include']: continue if limit_off: if station_code in stations_limit['exclude']: continue result[station_code] = dict() result[station_code]['name'] = sanitize( station['properties']['localEstacao']) result[station_code]['coordinates'] = station['geometry'][ 'coordinates'] if station_code in tz_azot_codes: result[station_code]['timezone'] = tz_azot else: result[station_code]['timezone'] = tz_wet if limit_on: if len(result) != len(stations_limit['include']): logger.error('Errors in the list of stations detected') exit(1) return result
def fetch_state(self, uuid): if uuid not in self.telescreens: return (uuid, None) port = self.telescreens[uuid] port.write("S") res = port.read() if res: return (uuid, json.loads(res)) else: del self.telescreens[uuid] return (uuid, None)
def test_latin1(self): ''' Testing with latin-1 for http://github.com/rtyler/py-yajl/issues/#issue/7 ''' char = 'f\xe9in' if not is_python3(): from tests import python2 char = python2.IssueSevenTest_latin1_char # The `json` module uses "0123456789abcdef" for its code points # while the yajl library uses "0123456789ABCDEF", lower()'ing # to make sure the resulting strings match out = yajl.dumps(char).lower() self.assertEquals(out, '"f\\u00e9in"') out = yajl.dumps(out).lower() self.assertEquals(out, '"\\"f\\\\u00e9in\\""') out = yajl.loads(out) self.assertEquals(out, '"f\\u00e9in"') out = yajl.loads(out) self.assertEquals(out, char)
def testStringDecoding(self): log('STRING DECODE') # This should decode to a utf-8 str()! # Not a unicode instance! s = yajl.loads('"abc"') print(repr(s)) obj = yajl.loads('"\u03bc"') assert isinstance(obj, str), repr(obj) self.assertEqual(obj, '\xce\xbc') obj = yajl.loads('"\xce\xbc"') assert isinstance(obj, str), repr(obj) self.assertEqual(obj, '\xce\xbc') # Invalid utf-8. Doesn't give a good parse error! if 0: u = yajl.loads('"\xFF"') print(repr(u))
def main(args): auth = BasicAuth(login=args.username, password=args.password) user_list = list() result = run(users(tc_users.format(args.url), auth)) if not result: exit(1) result = loads(result) [user_list.append(el) for el in result['user']] print(dumps(run(user(user_list, args.url, auth)), indent=2))
def main(): db = DB() if not db.open("bayes.kch", DB.OWRITER | DB.OCREATE): return with open("word_tf.txt") as word_tf: for line in word_tf: line = line.strip() word, bayes_list = loads(line) print word if bayes_list: ar = array('I') ar.fromlist(lineiter(bayes_list)) db[word] = ar.tostring()
async def scrape_one(url, session): try: async with session.get(url) as response: content = await response.text() except client_exceptions.ClientConnectorError: print(f'Scraping {url} failed due to the connection problem') return False if response.status not in http_ok: print( f'Scraping {url} failed due to the return code {response.status}') return False return loads(content)
def collect(): logger.debug('Collecting data from IPMA started') result = list() last = '' try: request = get(url_observation) except exceptions.ConnectionError: logger.error( 'Collecting data from IPMA failed due to the connection problem') return False if request.status_code in http_ok: content = loads(request.text) else: logger.error('Collecting data from IPMA failed due to the return code') return False if latest: last = sorted(content.items(), reverse=True)[0][0] for date in content: if latest and date != last: continue for station_code in content[date]: if station_code not in stations: continue if not content[date][station_code]: logger.info('Collecting data about station %s skipped', station_code) continue item = dict() item['id'] = station_code item['atmosphericPressure'] = content[date][station_code][ 'pressao'] item['dateObserved'] = datetime.strptime(date, '%Y-%m-%dT%H:%M') item['precipitation'] = content[date][station_code][ 'precAcumulada'] item['relativeHumidity'] = content[date][station_code]['humidade'] item['temperature'] = content[date][station_code]['temperatura'] item['windDirection'] = content[date][station_code][ 'idDireccVento'] item['windSpeed'] = content[date][station_code]['intensidadeVento'] result.append(item) logger.debug('Collecting data from IPMA ended') return result
async def get(el, url, auth, session): try: async with session.get(tc_user.format(url, str(el['id'])), auth=auth, headers=headers) as response: if response.status in http_ok: content = await response.text() except client_exceptions.ClientConnectorError: error(f"user_one:{el['username']}:ClientConnectionError") return False if response.status not in http_ok: error(f"user_one:{el['username']}:StatusCode:{response.status}") return False return loads(content)
def zhihu_to_dump(): with open('/home/zuroc/zpage/misc/spider/zhihu_question_to_dump.json') as zhihu_question_dump: for line in reversed(list(zhihu_question_dump)): line = loads(line) key = line[1] filename = md5(key).hexdigest() #print filename path = '/tmp/www.zhihu.com/%s'%filename if exists(path): r = line_parser(path, line) else: r = None if not r: yield line[-2], line[2], line[-1]
def setup_stations(stations_limit): result = dict() limit_on = False limit_off = False resp = None if 'include' in stations_limit: limit_on = True if 'exclude' in stations_limit: limit_off = True try: resp = get(url_stations) except exceptions.ConnectionError: exit(1) if resp.status_code not in http_ok: logger.error( 'Collecting the list of stations from IPMA failed due to the return code %s', resp.status_code) exit(1) content = loads(resp.text) for station in content: station_code = str(station['globalIdLocal']) if limit_on: if station_code not in stations_limit['include']: continue if limit_off: if station_code in stations_limit['exclude']: continue result[station_code] = dict() result[station_code]['postalCode'] = station_code result[station_code]['addressLocality'] = sanitize(station['local']) result[station_code]['url'] = url_observation.format(station_code) if station_code in tz_azot_codes: result[station_code]['timezone'] = tz_azot else: result[station_code]['timezone'] = tz_wet if limit_on: if len(result) != len(stations_limit['include']): logger.error('Errors in the list of stations detected') exit(1) return result
def pay_notice(pay_id): from notice import notice_new trade = Trade.get(pay_id) notice_new(trade.from_id, trade.to_id, CID_NOTICE_PAY, pay_id) t_log = (trade_log.get(pay_id)) if t_log: message = loads(t_log) if 'txt' in message: if 'secret' in message: state = STATE_SECRET else: state = STATE_ACTIVE to_user = Zsite.mc_get(trade.to_id) from_user = Zsite.mc_get(trade.from_id) to_user.reply_new(from_user, message['txt'], state)
def get_rss_basic_json(url): if url.startswith('https://www.google.com/reader/') or url.startswith('http://www.google.com/reader/'): if url.startswith('http://'): url = 'https'+url[4:] url = unquote(url) detail_url = RSS_DETAIL+quote(url) try: r = urlopen(detail_url, timeout=10).read() except: traceback.print_exc() r = loads(r) if r['responseStatus'] == 200: return r
def zhihu_to_dump(): with open('/home/zuroc/zpage/misc/spider/zhihu_question_to_dump.json' ) as zhihu_question_dump: for line in reversed(list(zhihu_question_dump)): line = loads(line) key = line[1] filename = md5(key).hexdigest() #print filename path = '/tmp/www.zhihu.com/%s' % filename if exists(path): r = line_parser(path, line) else: r = None if not r: yield line[-2], line[2], line[-1]
def dispatch(self, encoded_url=None, **kwargs): if not encoded_url: return 'Fail' entry = {} if encoded_url == self.not_found: entry = {'url' : 'http://urlenco.de/404'} else: entry = json.loads(self.redis[encoded_url]) if not entry.get('url'): return 'Fail' self.code = 301 self.headers.append(('Location', entry['url'] + '\r\n')) return ''
def _fetch(self, url): is_search = url.startswith('/search.json') logging.debug('_fetch("%s")' % url) if os.environ.get('DEBUG') and os.environ.get('USEFILES'): if not is_search: with open(DEBUG_HOME_FILE, 'r') as fd: return json.loads(fd.read()) else: with open(DEBUG_SEARCH_FILE, 'r') as fd: return json.loads(fd.read()) connection = None if is_search: connection = httplib.HTTPConnection(TWITTER_DOMAIN) else: connection = httplib.HTTPSConnection(TWITTER_DOMAIN) connection.putrequest('GET', url) if not is_search: # No need to send extra bits in for searches connection.putheader('Authorization', self._auth_header()) connection.endheaders() try: response = connection.getresponse() data = response.read() if os.environ.get('DEBUG'): if is_search: with open(DEBUG_SEARCH_FILE, 'w') as fd: fd.write(data) else: with open(DEBUG_HOME_FILE, 'w') as fd: fd.write(data) return json.loads(data) except Exception, ex: logging.error(ex) return []
def post(self, cat_id): err = False content = loads(self.request.body) name = content.get('name') status = content.get('status') category = Category.find_one(dict(_id=int(cat_id))) if not name: err = u'Category Name can\'t be empty!' elif name != category.name and Category.count( dict(name=name, status={'$ne': '0'})): err = u'Name already be used!' else: Category._update(cat_id, name=name, status=status) self.finish(dict(err=err))
def loads(idict, **kwargs): """ Based on default MODULE invoke appropriate JSON decoding API call """ if MODULE == 'json': return json.loads(idict, **kwargs) elif MODULE == 'cjson': return cjson.decode(idict) elif MODULE == 'yajl': try: # yajl.loads("123") will fail res = yajl.loads(idict) except: # fall back into default python JSON res = json.loads(idict, **kwargs) return res else: raise Exception("Not support JSON module: %s" % MODULE)
async def collect_one(station, session): try: async with session.get(stations[station]['url']) as response: result = await response.read() except client_exceptions.ClientConnectorError: logger.error('Collecting data from IPMA station %s failed due to the connection problem', station) return False if response.status not in http_ok: logger.error('Collecting data from IPMA station %s failed due to the return code %s', station, response.status) return False content = loads(result.decode('UTF-8')) result = dict() result['id'] = station result['retrieved'] = datetime.now().replace(microsecond=0) result['forecasts'] = dict() today = datetime.now(tz).strftime("%Y-%m-%d") + 'T00:00:00' tomorrow = (datetime.now(tz) + timedelta(days=1)).strftime("%Y-%m-%d") + 'T00:00:00' for forecast in content: if forecast['idPeriodo'] != 24: continue date = forecast['dataPrev'] if date not in [today, tomorrow]: continue result['forecasts'][date] = dict() result['forecasts'][date]['feelsLikeTemperature'] = check_entity(forecast, 'utci') result['forecasts'][date]['issued'] = datetime.strptime(forecast['dataUpdate'], '%Y-%m-%dT%H:%M:%S') result['forecasts'][date]['period'] = forecast['idPeriodo'] result['forecasts'][date]['precipitationProbability'] = check_entity(forecast, 'probabilidadePrecipita') result['forecasts'][date]['relativeHumidity'] = check_entity(forecast, 'hR') result['forecasts'][date]['temperature'] = check_entity(forecast, 'tMed') result['forecasts'][date]['tMax'] = check_entity(forecast, 'tMax') result['forecasts'][date]['tMin'] = check_entity(forecast, 'tMin') result['forecasts'][date]['weatherType'] = check_entity(forecast, 'idTipoTempo') result['forecasts'][date]['windDirection'] = check_entity(forecast, 'ddVento') result['forecasts'][date]['windSpeed'] = check_entity(forecast, 'ffVento') return result
def run(self): while True: line = self.write_queue.get() if line: output.write(line + '\n') output.flush() data = loads(line) links = data['data']['links'] if 'next' in links: nextlink = links['next'][0]['href'] self.queue.put(nextlink) while queue.qsize() < THREAD_NUM * 2: for id in input: self.queue.put(URL % id.strip()) break self.write_queue.task_done()
def post(self, aff_id): affiliate_edit = Affiliate.find_one(dict(_id=int(aff_id))) user_edit = User._get(affiliate_edit.user_id) form = loads(self.request.body) err = {} if not form.get('email'): err['email'] = 'Please input email' else: emails = form['email'].replace(' ', '').split(',') for e in emails: if not is_email(e): err['email'] = 'Email not valid, email=%s' % e elif e not in user_edit.email and User.count( dict(email=e, deleted=False)): err['email'] = "email %s already be used!" % e if not form.get('account'): err['account'] = 'Pleast input your account' elif form.get('account') != user_edit.account and User.count( dict(account=form.get('account'), deleted=False)): err['account'] = 'Account already be used!' if not form.get('password'): err['password'] = '******' elif form.get( 'password') != user_edit.password and not is_valid_password( form.get('password')): err['password'] = '******' if not form.get('account_manager') and form.get('status') != '0': err['account_manager'] = 'Please select Account Manager!' if not err: kw = dict( email=emails, password=form.get('password'), account=form.get('account'), role_id=Role.affiliate()._id, skype_id=form.get('skype_id'), phone=form.get('phone'), ) user = User._update(user_edit._id, **kw) aff = Affiliate._update(aff_id, **form) self.finish(dict(err=err if err else False))
def get_building_by_code(code): """ Returns a Building model for the given code, or None """ path = os.path.join( os.path.dirname(__file__), '..', 'data', 'buildings.json') f = open(path) building_data = yajl.loads(f.read()) if code in building_data: data = building_data[code] building = Building() building.longitude = data["longitude"] building.latitude = data["latitude"] building.name = data["name"] return building
def __init__(self,order=None,nets=None,cfg=None): super(Wrapper,self).__init__() self.order = [] self.nets = {} if not cfg and (not order or nets):raise ValueError('You have to either pass a config or the order and networks to the Wrapper class') if cfg: cfg = sod(cfg) for k,v in cfg['Nets'].items(): self.order.append(k) self.nets[k] = Compile( json.loads(open(cfg['Nets'][k],'r').read() ) ) if "Load" in cfg: try:self.load_state_dict(cfg['Load']) except:pass else: self.order = order self.nets = nets self.likelihood = None for o in self.order: if hasattr(self.nets[o],'likelihood'):self.likelihood = self.nets[o].likelihood
def get_numpages(book): params = { 'action': 'query', 'format': 'json', 'prop': 'imageinfo', 'titles': 'File:{book}'.format(book=book), 'iilimit': '50', 'iiprop': 'size' } params = urllib.parse.urlencode(params).encode('ascii') logger.info("\tRequest image info for file 'File:{book}'".format(book=book)) with urllib.request.urlopen(COMMONS_API, params) as f: data = json.loads(f.read().decode('utf-8')) numpages = list(data['query']['pages'].values())[0]['imageinfo'][0]['pagecount'] return int(numpages)
def __init__(self, cfg): # super(Pipe,self).__init__() cfg = sod(cfg) self.cfg = cfg self.order = [] self.models = {} self.no_grad = False for k, v in cfg.items(): if "Load" in k: continue self.order.append(k) self.models[k] = Compile(json.loads(open( v, 'r').read())) if isinstance(v, str) else ParallelPipe(v) if "Load" in cfg: for k, v in cfg['Load'].items(): try: self.models[k].load_state_dict(torch.load(v)) except: self.models[k].load(cfg['Load'])
def post(self): content = loads(self.request.body) status = content.get("status", "") limit = int(content.get("limit")) page = int(content.get("page")) skip = (page - 1) * limit cats = Category.find(dict( status={"$ne": '0'} if not status or status == '0' else status), skip=skip, limit=limit) cat_count = Category.count(dict( status={"$ne": '0'} if not status or status == '0' else status), skip=skip, limit=limit) for cat in cats: cat['offer_count'] = Offers.count(dict(category=str(cat._id))) cat.status = 'Active' if cat.status == '1' else 'Pending' self.finish(dict(cat=cats, cat_count=cat_count))
async def get_handler(request): url = keyrock + '/oauth2/token' headers = {'Content-Type': 'application/x-www-form-urlencoded'} data = {'grant_type': 'authorization_code', 'code': request.rel_url.query['code'], 'redirect_uri': redirect_uri} try: async with ClientSession() as session: async with session.post(url, auth=auth, data=data, headers=headers) as response: content = loads(await response.text()) status = response.status except ClientConnectorError: return web.HTTPUnauthorized() except TimeoutError: return web.HTTPUnauthorized() if status not in http_ok: return web.HTTPUnauthorized() if 'access_token' not in content: return web.HTTPUnauthorized() expires = dt.utcnow() + timedelta(hours=cookie_lifetime) expires_cookie = dt.strftime(expires, '%a, %d-%b-%Y %H:%M:%S') value = { 'access_token': content['access_token'], 'refresh_token': content['refresh_token'], 'expires': expires.isoformat() } value = urlsafe_b64encode(cipher_suite.encrypt(dumps(value).encode())).decode('UTF-8') response = web.HTTPSeeOther(upstream) response.set_cookie(name=cookie_name, value=value, expires=expires_cookie) return response
async def get_handler(request): cookie = request.cookies.get(cookie_name) last_visit = 'None' if cookie is not None: cookie = loads(cipher_suite.decrypt(urlsafe_b64decode(cookie)).decode('UTF-8')) last_visit = cookie['last_visit'] text = 'Last visited: {}'.format(last_visit) cookie = dict() cookie['last_visit'] = dt.utcnow().isoformat() cookie = urlsafe_b64encode(cipher_suite.encrypt(dumps(cookie).encode())).decode('UTF-8') response = web.Response(text=text) response.set_cookie(name=cookie_name, value=cookie) return response
def export_time_country(cls, self): spec = loads(self.get_argument('filter')) if spec['time']: spec['time'] = {'$gte': spec['time']['start'], '$lte': spec['time']['end']} for k,v in spec.items(): if not v: del spec[k] # ac = Activity.find(spec) sort_by_time = Activity.aggregate([ {'$match': spec}, {'$group': {'_id': '$time', 'new': {'$sum': '$new'}, 'active': {'$sum': '$active'},'total': {'$sum': '$total'}}}, {'$sort': {'_id': 1}} ]) sort_by_time = sort_by_time['result'] sort_by_country = Activity.aggregate([ {'$match': spec}, {'$group': {'_id': '$country', 'new': {'$sum': '$new'}, 'active': {'$sum': '$active'},'total': {'$sum': '$total'}}}, {'$sort': {'_id': 1}} ]) sort_by_country = sort_by_country['result'] return sort_by_time, sort_by_country