def test_array_numpy_labelled(self): labelled_input = {"a": []} output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True) assert (np.empty((1, 0)) == output[0]).all() assert (np.array(["a"]) == output[1]).all() assert output[2] is None labelled_input = [{"a": 42}] output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True) assert (np.array([u("a")]) == output[2]).all() assert (np.array([42]) == output[0]).all() assert output[1] is None # see gh-10837: write out the dump explicitly # so there is no dependency on iteration order input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, ' '{"a": 2.4, "b": 78}]') output = ujson.loads(input_dumps, numpy=True, labelled=True) expected_vals = np.array( [42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2)) assert (expected_vals == output[0]).all() assert output[1] is None assert (np.array([u("a"), "b"]) == output[2]).all() input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, ' '"3": {"a": 2.4, "b": 78}}') output = ujson.loads(input_dumps, numpy=True, labelled=True) expected_vals = np.array( [42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2)) assert (expected_vals == output[0]).all() assert (np.array(["1", "2", "3"]) == output[1]).all() assert (np.array(["a", "b"]) == output[2]).all()
def test_array_numpy_labelled(self): labelled_input = {"a": []} output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True) assert (np.empty((1, 0)) == output[0]).all() assert (np.array(["a"]) == output[1]).all() assert output[2] is None labelled_input = [{"a": 42}] output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True) assert (np.array(["a"]) == output[2]).all() assert (np.array([42]) == output[0]).all() assert output[1] is None # see gh-10837: write out the dump explicitly # so there is no dependency on iteration order input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, {"a": 2.4, "b": 78}]' output = ujson.loads(input_dumps, numpy=True, labelled=True) expected_vals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape( (3, 2)) assert (expected_vals == output[0]).all() assert output[1] is None assert (np.array(["a", "b"]) == output[2]).all() input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, ' '"3": {"a": 2.4, "b": 78}}') output = ujson.loads(input_dumps, numpy=True, labelled=True) expected_vals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape( (3, 2)) assert (expected_vals == output[0]).all() assert (np.array(["1", "2", "3"]) == output[1]).all() assert (np.array(["a", "b"]) == output[2]).all()
def test_encode_non_c_locale(self): lc_category = locale.LC_NUMERIC # We just need one of these locales to work. for new_locale in ("it_IT.UTF-8", "Italian_Italy"): if tm.can_set_locale(new_locale, lc_category): with tm.set_locale(new_locale, lc_category): assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60 assert ujson.loads("4.78", precise_float=True) == 4.78 break
def test_encode_non_c_locale(self): lc_category = locale.LC_NUMERIC # We just need one of these locales to work. for new_locale in ("it_IT.UTF-8", "Italian_Italy"): if tm.can_set_locale(new_locale, lc_category): with tm.set_locale(new_locale, lc_category): assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60 assert ujson.loads("4.78", precise_float=True) == 4.78 break
def json_response(input_variables, endpoint, lookback): print("as", inputs(input_variables, lookback)) json_res = requests.post( url=endpoint, data=json.dumps({'instances': [inputs(input_variables, lookback)]}), headers={'Content-Type': 'application/json'}) return json.loads(json_res.text)
def save_general_embeddings(): general_word_vector_dict = generate_word_vector_dict("pre-word-embedding/sgns.zhihu.bigram-char") domain_word_vector_dict = generate_word_vector_dict("datafountain/word_vectors.txt") general_word_dict_file = open(os.path.join(DATA_SET_DIR, "datafountain/general_word_dict.json"), "r", encoding="utf-8") general_word_dict = json.loads(general_word_dict_file.readline()) word_tuple = list(sorted([(index, word) for word, index in general_word_dict.items()], key=lambda x: x[0])) i = 0 general_embeddings = [] domain_embeddings = [] for index, word in word_tuple: if i != index: raise Exception("my aaaaa") i += 1 embeddings = general_word_vector_dict[word] if word in general_word_vector_dict else domain_word_vector_dict[word] if len(embeddings) < 300: print(word) general_embeddings.append(embeddings) domain_embeddings.append(domain_word_vector_dict.get(word, [0] * 300)) x = torch.tensor(general_embeddings) torch.save(x, "general_embeddings.ts") torch.save(torch.tensor(domain_embeddings), "domain_embeddings.ts")
def search_restaurant(request): response = [] result = [] information = {'state':'1'} s = requests.session() a = s.post( 'http://39.98.52.189:82/api/open/companyList', headers={'auth':'gAAAAABcfjh_HP3zWYfdz_j1Cs15uECzHHNt3ujXJtV5C_' 'mbWuk-xEeshqunTYacVQVglTqa1pIhaESn3iuqLu9b6UFIhZ-wt4V6hCqJX3vLFQuc5cPP1_' 'qPiQcsl3fH6y-NDE3TqS4qKXZY9_gUkHl09eDMZeHgxw==', 'Content-Type':'application/json'}, data=json.dumps(information)) j = json.loads(a.text) print(j) for company in j['companyList']: response.append({"id": company['id'], "name": company['brand']}) result.append((company['id'],company['brand'])) #db = pymysql.connect("127.0.0.1","root","zwd19941224","abc",charset='utf8') print("here") #cursor = db.cursor() #inesrt_re = "insert into configuration_restaurant(res_id,name)values(%s,%s)" #cursor.executemany(inesrt_re, result) #db.commit() #cursor.close() if response == {}: response = {"name": "wrong"} return JsonResponse(response,safe=False)
def lol(request): body_unicode = request.body.decode('utf-8') json_otv = json.loads(body_unicode) mass = [json_otv["SQUARE"], json_otv["RM"], json_otv["NOX"]] kek = {"value": pred(mass)} return HttpResponse(json.dumps(kek), content_type='application/json')
def world_map(): d = requests.get('https://api.covid19api.com/summary') d2 = px.data.gapminder().query("year==2007") js = json.loads(d.text) df = pd.DataFrame(js['Countries']) d2.columns = [ 'Country', 'continent', 'year', 'lifeExp', 'pop', 'gdpPercap', 'iso_alpha', 'iso_num' ] df = df.merge(d2, on='Country') df = df.dropna() fig = px.choropleth(df, locations="iso_alpha", color="TotalConfirmed", hover_name="Country", hover_data=[ 'NewConfirmed', 'TotalConfirmed', 'NewDeaths', 'TotalDeaths', 'NewRecovered', 'TotalRecovered' ], color_continuous_scale=px.colors.sequential.Plasma) fig.update_layout( template='plotly_white', autosize=True, height=360, margin=dict(t=0, b=0, l=0, r=0), coloraxis_showscale=False, ) config = { 'displayModeBar': False, 'displaylogo': False, 'scrollZoom': True, } return plot(fig, output_type='div', include_plotlyjs=False, config=config)
def main(): cws_model_path = os.path.join(LTP_DATA_DIR, "cws.model") # 分词模型路径,模型名称为`cws.model` segmentor = Segmentor() # 初始化实例 segmentor.load(cws_model_path) # 加载模型 train_file = os.path.join(DATA_SET_DIR, train_file_path) # validationset_file = os.path.join(DATA_SET_DIR, validationset_file_path) # content_id, content, subject, sentiment_value, sentiment_word lines = pd.read_csv( train_file, header=0, dtype={ "content_id": np.str, "content": np.str, # "subject": np.str, "sentiment_value": np.int, "sentiment_word": np.str }, index_col="content_id", ) lines = lines["content"] json_file = open("words.json", "r") word_dict = json.loads(json_file.readline()) for line in lines: words = segmentor.segment( line[1 if line[0] == "\"" else 0:-1 if line[-1] == "\"" else len(line)]) regist_words(word_dict, words) word_dict_json = json.dumps(word_dict) json_file = open("words.json", "w") json_file.write(word_dict_json) json_file.flush() json_file.close()
def data(): zip_dict = {} print('Lets Start') # Making a list of missing value types missing_values = ["n/a", "na", "-", ".", "NaN"] # df = pd.read_csv ('./data/NYPD_Motor_Vehicle_Collisions.csv', na_values=['.']) df = pd.read_csv('data/dummy.csv', na_values=missing_values) # just for Bronx - elbow test # print df.head(10) df1 = df[[ 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'NUMBER OF PERSONS INJURED', 'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST INJURED', 'NUMBER OF MOTORIST KILLED' ]] df2 = df1.dropna(how='any') print df2.head(10) df2['ZIP CODE'] = pd.to_numeric(df['ZIP CODE'], errors='coerce') print df2.dtypes df3 = df2.drop(['ZIP CODE', 'LATITUDE', 'LONGITUDE'], axis=1) print df3.head(5) tweets = [] for index, row in df2.iterrows(): d = str(row[0]) tweets.append(json.loads(d)) print tweets
def getlocation(lat, lng): #31.809928, 102.537467, 3019.300 #lat = '31.809928' #lng = '102.537467' url = 'http://api.map.baidu.com/geocoder/v2/?location=' + lat + ',' + lng + '&output=json&pois=1&ak=003EurM0nat1YfowgMZPIBpDtGjbGOri' req = urllib.request.urlopen(url) # json格式的返回数据 res = req.read().decode("utf-8") # 将其他编码的字符串解码成unicode return json.loads(res)
def test_dumps_ints_larger_than_maxsize(self, bigNum): # GH34395 bigNum = sys.maxsize + 1 encoding = ujson.encode(bigNum) assert str(bigNum) == encoding # GH20599 with pytest.raises(ValueError): assert ujson.loads(encoding) == bigNum
def test_dumps_ints_larger_than_maxsize(self, bigNum): encoding = ujson.encode(bigNum) assert str(bigNum) == encoding with pytest.raises( ValueError, match="Value is too big|Value is too small", ): assert ujson.loads(encoding) == bigNum
def lol(request): if request.method == 'POST': body_unicode = request.body.decode('utf-8') json_otv = json.loads(body_unicode) mass = [] for val in dict.values(json_otv): mass.append(val) kek = {"value": pred(mass)} return HttpResponse(json.dumps(kek), content_type='application/json')
def relevancy(): api_key = request.headers.get('api_key') if api_key!='123456789' or api_key==None: return '401' jObj = json.loads((request.data).decode("utf-8")) text = jObj['body'] corefs = corefrenceResolverRelavancy(text) result = prominanceProbablity((request.data).decode("utf-8"), corefs) return result
def main(): cws_model_path = os.path.join(LTP_DATA_DIR, "cws.model") # 分词模型路径,模型名称为`cws.model` segmentor = Segmentor() # 初始化实例 segmentor.load(cws_model_path) # 加载模型 train_file = os.path.join(DATA_SET_DIR, train_file_path) # validationset_file = os.path.join(DATA_SET_DIR, validationset_file_path) lines = pd.read_csv(train_file, header=0, dtype={ "id": np.int, "content": np.str, "location_traffic_convenience": np.int, "location_distance_from_business_district": np.int, "location_easy_to_find": np.int, "service_wait_time": np.int, "service_waiters_attitude": np.int, "service_parking_convenience": np.int, "service_serving_speed": np.int, "price_level": np.int, "price_cost_effective": np.int, "price_discount": np.int, "environment_decoration": np.int, "environment_noise": np.int, "environment_space": np.int, "environment_cleaness": np.int, "dish_portion": np.int, "dish_taste": np.int, "dish_look": np.int, "dish_recommendation": np.int, "others_overall_experience": np.int, "others_willing_to_consume_again": np.int }, index_col="id", usecols=["id", "content"]) lines = lines["content"] json_file = open("words.json", "r") word_dict = json.loads(json_file.readline()) for line in lines: words = segmentor.segment( line[1 if line[0] == "\"" else 0:-1 if line[-1] == "\"" else len(line)]) regist_words(word_dict, words) word_dict_json = json.dumps(word_dict) json_file = open("words.json", "w") json_file.write(word_dict_json) json_file.flush() json_file.close()
def main(): cws_model_path = os.path.join(LTP_DATA_DIR, "cws.model") # 分词模型路径,模型名称为`cws.model` segmentor = Segmentor() # 初始化实例 segmentor.load(cws_model_path) # 加载模型 train_dict = json.loads( open(os.path.join(DATA_SET_DIR, train_file_path), "r").readline()) test_dict = json.loads( open(os.path.join(DATA_SET_DIR, test_file_path), "r").readline()) contents = [] for value in train_dict.values(): contents.append(" ".join( [word for word in segmentor.segment(value["content"])])) for value in test_dict.values(): contents.append(" ".join( [word for word in segmentor.segment(value["content"])])) contents_file = open("contents.txt", "w", encoding='utf-8') contents_file.write("\n".join(contents))
def main(): cws_model_path = os.path.join(LTP_DATA_DIR, "cws.model") # 分词模型路径,模型名称为`cws.model` segmentor = Segmentor() # 初始化实例 segmentor.load(cws_model_path) # 加载模型 train_file = os.path.join(DATA_SET_DIR, train_file_path) words_dict_json = open("general_word_dict.json", "r") words_dict = json.loads(words_dict_json.readline()) lines = pd.read_csv( train_file, header=0, dtype={ "content_id": np.str, "content": np.str, "subject": np.str, "sentiment_value": np.int, "sentiment_word": np.str }, ) lines_dict = {} for index in lines.index: line = lines.loc[index] # line_dict = lines_dict.get(line["content_id"], { # "sentiment_values": [3 for _ in range(10)], # "sentiment_word": ["" for _ in range(10)] # }) line_dict = {} # line_dict["subject"] = line["subject"] line_dict["content"] = line["content"][ 1 if line["content"][0] == "\"" else 0:-1 if line["content"][-1] == "\"" else len(line["content"])] # line_dict["sentiment_values"][ASPECT_DICT[line["subject"]]] = line["sentiment_value"] + 1 # line_dict["sentiment_word"][ASPECT_DICT[line["subject"]]] = line["sentiment_word"] line_dict["content_indexes"] = " ".join([ str(words_dict[word]) for word in segmentor.segment(line_dict["content"]) ]) lines_dict[line["content_id"]] = line_dict line_dict_json = json.dumps(lines_dict) json_file = open("test_public.json", "w") json_file.write(line_dict_json) json_file.flush() json_file.close()
def find_company_id(request): response = {} res_id = request.GET.get('res_id') s = requests.session() a = s.get( 'http://skyhawkapi.huilab.cn/api/company/list?token=gAAAAABb7QceFcdkZOZhOcwT9MOn_r6uwmHbPPfWDe3_eY65YvK0rP2LevULA6R9eiXYk4bvYX7jonI7b6RhdaIyNGQpXjSnBKuoEvNF4rBgP01nyKlfLnBFruRN7HvX4uXDSIhV7-R-24HoTTN5lUYJQ_sph9xXQA%3D%3D' ) j = json.loads(a.text) for company in j['companyList']: if company['id'] == int(res_id): response = {"id": res_id, "name": company['brand']} break if response == {}: response = {"name": "wrong"} return JsonResponse(response)
def get_page_rank(self, domain_to_query): try: params = urlencode({'domains[]': domain_to_query}) req = Request(OPEN_PAGE_RANK_API_URL + "?" + params) print(OPEN_PAGE_RANK_API_URL + "?" + params) req.add_header('API-OPR', OPEN_PAGE_RANK_API_KEY) result = json.loads( urlopen(req).read())['response'][0]['page_rank_decimal'] * 10 return SubFeature( 'Page Rank', result, result, tooltip= 'Domain ranking based on links from and to external sources') except: return SubFeatureError('Page Rank')
def forSummary(): api_key = request.headers.get('api_key') if not request.get_json(): abort(400) if api_key!='123456789' or api_key==None: return '401' jObj = json.loads((request.data).decode("utf-8")) text = str(jObj['body']) corefs = corefrenceResolverSummary(text) restp = Summarymain((request.data).decode("utf-8"), corefs) return restp, 201
def load_config_file(nfile, abspath=False): """ Read the configuration from a json file :param abspath: :param nfile: :return: """ ext = '.json' if 'json' not in nfile else '' pre = '' if abspath else './' fp = open(pre + nfile + ext, 'r') s = '' for l in fp: s += l return json.loads(s)
def put_lbt_equipment_update(request): """ :param request: request django object :param equipment: equipment name will be provided :param feed_name: feed name will be provided :param is_active: is_active name will be provided :return: json response """ obj = query_params = None try: query_params = { EQUIPMENT: request.GET[EQUIPMENT], FEED_NAME: request.GET[FEED_NAME], IS_ACTIVE: request.GET[IS_ACTIVE] } except: pass try: if request.method == PUT_REQUEST: jwt_value = _TokenValidation().validate_token(request) if jwt_value: obj = lbt_equipment_update(query_params, json.loads(request.body)) return obj.get_update_equip_query() else: return JsonResponse({MESSAGE_KEY: "FORBIDDEN ERROR"}, status=HTTP_403_FORBIDDEN) log_debug(METHOD_NOT_ALLOWED) return JsonResponse({MESSAGE_KEY: METHOD_NOT_ALLOWED}, status=HTTP_405_METHOD_NOT_ALLOWED) except jwt.ExpiredSignatureError: token = request.META[HTTP_AUTHORIZATION_TOKEN].split(" ")[1] return JsonResponse({MESSAGE_KEY: "Token Expired"}, status=HTTP_401_UNAUTHORIZED) except Exception as e: return error_instance(e) finally: if obj: del obj
def close_restaurant(request): response = {} state = request.GET.get('state') companyId = request.GET.get('companyId') s = requests.session() information = {'state':state,'companyId':companyId} a = s.put( 'http://39.98.52.189:82/api/open/companyList', headers={'auth': 'gAAAAABcfjh_HP3zWYfdz_j1Cs15uECzHHNt3ujXJtV5C_' 'mbWuk-xEeshqunTYacVQVglTqa1pIhaESn3iuqLu9b6UFIhZ-wt4V6hCqJX3vLFQuc5cPP1_' 'qPiQcsl3fH6y-NDE3TqS4qKXZY9_gUkHl09eDMZeHgxw==', 'Content-Type':'application/json'}, data=json.dumps(information)) j = json.loads(a.text) print(j['msg']) if j['msg'] == 'success': response = {"res": "ok"} else: response = {"res": "wrong"} return JsonResponse(response, safe=False)
def handle_json(message): content = json.loads(str(message)) print('Content ' + str(content)) if not content: return notes = loads_bd() id = 0 try: id = notes[-1]['id'] + 1 except Exception: id = 0 note = { 'id': id, 'title': content['title'], 'body': content['body'], 'color': content['color'], 'fontColor': content['fontColor'] } notes.append(note) write_db(notes) socketio.emit('add note', {'note': make_note_uri(note)})
def forSummary(): """Create a book request record @param email: post : the requesters email address @param title: post : the title of the book requested @return: 201: a new_uuid as a flask/response object \ with application/json mimetype. @raise 400: misunderstood request """ if not request.get_json(): abort(400) data = request.get_json(force=True) # if not data.get('email'): # abort(400) # if not validate_email(data['email']): # abort(400) # if not data.get('title'): # abort(400) jObj = json.loads((request.data).decode("utf-8")) text = cleanText(str(jObj['body'])) corefs = corefrenceResolver(text) restp = Summarymain((request.data).decode("utf-8"), corefs) return restp, 201
def test_decode_array_with_big_int(self): with pytest.raises(ValueError): ujson.loads("[18446098363113800555]")
def test_loads_non_str_bytes_raises(self): msg = "Expected 'str' or 'bytes'" with pytest.raises(TypeError, match=msg): ujson.loads(None)
def test_decode_floating_point(self, sign, float_number): float_number *= sign tm.assert_almost_equal(float_number, ujson.loads(str(float_number)), rtol=1e-15)
def test_decode_array_with_big_int(self, value): with pytest.raises( ValueError, match="Value is too big|Value is too small", ): ujson.loads(value)
def get_furnaces(self): """ :return: Json Response """ try: assert self._db_connection, { STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR, MESSAGE_KEY: DB_ERROR } if self.query_params: equipment = self.query_params[EQUIPMENT].split(",") """ This condition for used to it will select single equipment also even multiple equipment also based on user selection """ if len(equipment) == 1: equipment_param = '(' + str(equipment[0]) + ')' equipment_param = '(' + str(equipment[0]) + ')' else: equipment_param = tuple(equipment) perform_list_all = [] try: if self.query_params[IS_ACTIVE] == "true" and int( equipment[0]) < 15: self._psql_session.execute( MULTIPLE_CONFIG_EQUIPMENT.format( self.query_params[IS_ACTIVE], equipment_param, self.query_params[FEED_NAME])) elif int(equipment[0]) > 14: self._psql_session.execute( NON_FURNACE_EXTERNAL_TARGETS.format( equipment_param)) else: pass df = pd.DataFrame(self._psql_session.fetchall()) dt = df.groupby('equipment_tag_name').apply( lambda x: x.to_json(orient='records')) df.sort_values('parameter', ascending=True, inplace=True) obj = {} array = [] for each_data in dt: for each in json.loads(each_data): obj[each['equipment_tag_name']] = { 'external_targets': json.loads(each_data), 'performance_tags': None } perform = [] try: self._psql_session.execute( MULTIPLE_CONFIG_CASE_NAME_PERFORMACE_TAGS.format( equipment_param)) except Exception as e: log_error( 'Exception due to get_furnaces Function: %s' + str(e)) performance_list = json.loads( json.dumps(self._psql_session.fetchall())) perf_list = json.loads(json.dumps(performance_list)) try: self._psql_session.execute( ALL_PERF_TAGS_FOR_NON_FURNACES.format( equipment_param)) except Exception as e: log_error( 'Exception due to get_furnaces Function: %s' + str(e)) perameter_list = json.loads( json.dumps(self._psql_session.fetchall())) perform_list = json.loads(json.dumps(perameter_list)) if len(perform_list) > 0: for each_perform in perform_list: perform_list_all.append(each_perform['result']) else: pass for each_data in perf_list: try: obj[each_data["equipment_tag_name"]][ "performance_tags"] = each_data['case_name'] except Exception as err: pass for each_data in perform_list_all: try: obj[each_data["equipment_tag_name"]][ "performance_tags_list"] = each_data[ 'parameter'] except Exception as err: pass return JsonResponse(obj, safe=False, status=200) except Exception as e: log_error('Exception due to get_furnaces Function: %s' + str(e)) return JsonResponse({"message": str(e)}, safe=False) except AssertionError as e: log_error('Exception due to get_furnaces Function: %s' + str(e)) return JsonResponse({MESSAGE_KEY: e.args[0][MESSAGE_KEY]}, status=e.args[0][STATUS_KEY]) except Exception as e: log_error(traceback.format_exc()) return JsonResponse( {MESSAGE_KEY: EXCEPTION_CAUSE.format(traceback.format_exc())}, status=HTTP_500_INTERNAL_SERVER_ERROR)
def test_decode_floating_point(self, sign, float_number): float_number *= sign tm.assert_almost_equal(float_number, ujson.loads(str(float_number)), check_less_precise=15)
def test_loads_non_str_bytes_raises(self): msg = "Expected 'str' or 'bytes'" with pytest.raises(TypeError, match=msg): ujson.loads(None)
def test_decode_floating_point(self, sign, float_number): float_number *= sign tm.assert_almost_equal(float_number, ujson.loads(str(float_number)), check_less_precise=15)