def chainlink_prices(self, common_field, element, protocol_field, query_elements): """ Returns the chainlink prices in the correct format, if ETH pair are 18 decimals in other case 8 decimals """ price = dict_digger.dig(element, *query_elements[common_field]) pair = dict_digger.dig(element, *['assetPair', 'id']) pair_decimals = 8 tokens = pair.split('/') if len(tokens) > 1: if (tokens[1] == 'ETH'): pair_decimals = 18 return float(price) / 10**pair_decimals
def tx_id_hyphen(self, common_field, element, protocol_field, query_elements): """ Removes the trailing data afeter the - in the transaction id field """ field = dict_digger.dig(element, *query_elements[common_field]) return field.split('-')[0]
def remove_token_prefix(self, common_field, element, protocol_field, query_elements): """ Removes the token prefix of the token. Usefull to remove cTOkens or aTokens """ field = dict_digger.dig(element, *query_elements[common_field]) return field[1:]
def array_length(self, common_field, element, protocol_field, query_elements): """ Returns the length of a field of array type """ array_field = dict_digger.dig(element, query_elements[common_field][0]) return len(array_field)
def to_int(self, common_field, element, protocol_field, query_elements): """ Transform the value to integer """ ele = dict_digger.dig( element, *query_elements[common_field]) return int(ele)
def rates_units_comp(self, common_field, element, protocol_field, query_elements): """ Return the comp rates units converted """ rate = dict_digger.dig(element, *query_elements[common_field]) return float(rate) * 1e2
def set(self, key, value): """ Will set a value to a given key path. :param key: :param value: :return: """ if isinstance(key, (list, tuple)) and key.__len__ > 1: result = dict_digger.dig(self, key[:-1]) result[key[-1]] = value else: self.data[key] = value
def fetch_data(self, url, path_to_data=None, path_to_next=None): """Fetch data from spotify api Args: url (str): url path spotify api path_to_data (list): list style path to the items of interest. Default is to return all data as a dict instead of accumulating a list of data dicts path_to_next (list): list style path to the next url for pagination support """ token = self.integration.access_token data = [] all_data_loaded = False if "?" in url: url += f"&access_token={token}" else: url += f"?access_token={token}" while not all_data_loaded: response = requests.get(url) response.raise_for_status() if response.status_code == 204: return None # no content else: response_json = response.json() if path_to_data: data += dig(response_json, *path_to_data) next_url = dig(response_json, * path_to_next) if path_to_next else None if next_url: url = next_url + f"&access_token={token}" time.sleep(0.1) else: all_data_loaded = True else: data = response_json all_data_loaded = True return data
def transform(self, element, common_field, protocol_field, transformations, query_elements): """ Main function, is called from the mapper, if the field to map has a transformation function contained in the class dictionary, the specified function will be applied, in other case the value will be returned without modification. """ if common_field in transformations: type_transformer = transformations[common_field] return self.transformers[type_transformer](common_field, element, protocol_field, query_elements) else: return dict_digger.dig( element, *protocol_field)
def index(): try: content = request.get_json(force=True) requestPart = dict_digger.dig(content,'request') if requestPart != None: transformDataNdarray(requestPart) responsePart = dict_digger.dig(content,'response') if responsePart != None: transformDataNdarray(responsePart) #log formatted json to stdout for fluentd collection print(str(json.dumps(content))) sys.stdout.flush() return str(content) except Exception as e: print(e, file=sys.stderr) return 'Error processing input'
def get_total_credits(env, user, password): url1 = env_switcher(env)[0] url2 = env_switcher(env)[1] # Generate a token for the user genToken = gen_token_via_rest(url1, user, password) # Build URL from passed environment and URI for endpoint totalCreditsUrl = url2 + '/sms/rest/subscription/developers/subscriptionInfo' # Parameters for call totalCreditsPayload = { 'token': genToken } # Make the post to the api totalCreditsPost = requests.get( totalCreditsUrl, params=totalCreditsPayload, verify=True) # Returned data from call totalCreditsData = json.loads(totalCreditsPost.content) # Possible know membership levels levels = { None: '50.00', 'Level 1': '200.00', 'Level 2': '900.00', 'Level 3': '2000.00', 'Level 4': '4000.00', 'Level 5': '10000.00', 'Level 6': '20000.00', 'Level 7': '40000.00' } # Figure the Level from known credits for i in range(len(dig(totalCreditsData, 'subscriptions'))): if dig(totalCreditsData, 'subscriptions', i, 'subscriptionSubType') in levels: return levels[dig(totalCreditsData, 'subscriptions', i, 'subscriptionSubType')]
def fetch_data(self, url, path_to_data, path_to_next): token = self.integration.access_token data = [] all_data_loaded = False while not all_data_loaded: response = requests.get(url).json() try: data += dig(response, *path_to_data) except: return(data) next_url = dig(response, *path_to_next) if next_url: if token: url = next_url + f"&access_token={token}" else: url = next_url else: all_data_loaded = True time.sleep(0.1) return(data)
def concat_symbols(self, common_field, element, protocol_field, query_elements): """ Return the list of symbols of the pool concatenated """ tokens_concat = '' tokens_list = query_elements[common_field] for index, token_path in enumerate(tokens_list): token = dict_digger.dig(element, *token_path) tokens_concat += token if (index + 1) < len(tokens_list): tokens_concat += '/' return tokens_concat
def get(self, key): """ Will fetch a config value. :param key: :return: """ if isinstance(key, str): key = (key, ) result = dict_digger.dig(self.data, *key) if result is None: if isinstance(key, (tuple, dict, list)): raise KeyError( "Could not find requested key '%s' in configuration" % '.'.join(key)) else: raise KeyError( "Could not find requested key '%s' in configuration" % key) return result
def make_posts(target_urls: List[str], proxy: ProxyHandler, target_date_from: datetime, target_date_to: datetime) -> List[dict]: # 複数RSSのURLを処理する feeds = [feedparser.parse(url, handlers=[proxy]) for url in target_urls] opener = build_opener(proxy) install_opener(opener) posts = [] entries = [] for feed in feeds: entries.extend( [entry for entry in feed['entries'] if dig(entry, 'published')]) for entry in entries: rss_date_str = entry['published'] # 'Wed, 24 Mar 2021 22:33:04 GMT' rss_date: datetime = parse(rss_date_str).astimezone( timezone(timedelta(hours=9), 'JST')) if target_date_from <= rss_date <= target_date_to: # RSSのpublishedの日付が対象日付であれば posts += [{ 'title': entry['title'], 'title_link': entry['link'], }] # リンク先に重複があれば削除しておく。 # リンク先のみのリストを作っておき、すでにattachmentsに追加していれば返却時のattachmentsには追加しない。 title_link_list = [elem['title_link'] for elem in posts] posts = [ elem for i, elem in enumerate(posts) if elem['title_link'] not in title_link_list[0:i] ] return posts
def process_data(out, width, height): conf = [] labels = [] bounds = [] temp = True # sort through output dictionary and use relevant information for i in range(len(out)): label = dig(out[i], 'label') if label == 'person': conf.append(dig(out[i], 'confidence')) bounds.append( ((dig(out[i], 'topleft', 'x'), dig(out[i], 'topleft', 'y')), (dig(out[i], 'bottomright', 'x'), dig(out[i], 'bottomright', 'y')))) labels.append('person') temp = False # if no person is detected, stay at same position if temp: return median, 1500, 1500 # calc centers/area of images/boxes box_center = (int((bounds[0][0][0] + bounds[0][1][0]) / 2), int((bounds[0][0][1] + bounds[0][1][1]) / 2)) area = abs((bounds[0][0][0] - bounds[0][1][0]) * (bounds[0][0][1] - bounds[0][1][1])) img_center = (int(width / 2), int(height / 2)) # calculate the necessary movements yaw, throt = process_center(box_center) pitch = process_area(area) conf = [] bounds = [] labels = [] return throt, yaw, pitch
def test_end_point_miss(): result = dict_digger.dig(TEST_HASH,'a','z') eq_(None, result)
def test_nested_miss(): result = dict_digger.dig(TEST_HASH,'c','b') eq_(None, result)
def test_complex_find(): result = dict_digger.dig(TEST_COMPLEX, 'a', 'list', 0) assert isinstance(result, dict) result = dict_digger.dig(TEST_COMPLEX, 'a', 'list', 0, 'x') eq_('peanut butter', result)
async def dicTest(): await bot.say( dict_digger.dig(crewData, dailyData.get('CommonCrewId'), 'Rarity'))
def __get__(self, instance, cls): try: return dict_digger.dig(instance.response, *self.path, fail=True) except KeyError, IndexError: return None
def test_nested_find(): result = dict_digger.dig(TEST_HASH, 'a', 'b') eq_("tuna", result)
def test_no_exception(): result = dict_digger.dig(TEST_LIST,0,'b','c') assert result is None
def test_toplevel_find(): result = dict_digger.dig(TEST_HASH, 'a') eq_({'b': 'tuna', 'c': 'fish'}, result)
def sms_start(): # Pega os parâmetros provindos do Twilio from_number = request.values.get('From') message_body = request.values.get('Body') to_number = request.values.get('To') # Instancia servico Twilio account_sid = '{your twilio service id}' auth_token = '{your twilio auth token}' twilio_client = Client(account_sid, auth_token) # print(request.values) try: assistant except UnboundLocalError: # instancia serviço do waston assistant caso não tenha sido create_assistant() except NameError: create_assistant() # Essa parte está responsável para gerenciar múltiplas conversas simultâneas if sessionList.checkKeyExistance(from_number) != True: session = create_session(workspace_id, assistant_id, assistant) json_string = json.dumps(session) json_dict = json.loads(json_string) session_id = json_dict.get("session_id") sessionList.addNewSession(session_id, from_number) session_id = sessionList.getIidByKey(from_number) else: session_id = sessionList.getIidByKey(from_number) # Inicializa variáveis utilizadas pelo chat user_input = '' context = {} current_action = '' # Principal loop de entrada/saída: while current_action != 'end_conversation': while True: # Captura mensagem do cliente para o chatbot response = assistant.message( workspace_id=workspace_id, assistant_id=assistant_id, session_id=session_id, input={ 'text': message_body}, context=context).get_result() # Gera resposta do chatbot # INICIO FUNÇOES DE LOG print("####----------Round Start-----------####") print("INPUT") print(" Mensagem de " + from_number + ": '" + message_body + "'") if len(dict_digger.dig(response, 'output', 'intents')) != 0: print(" Intenções: " + str(dict_digger.dig(response, 'output', 'intents', 0, 'intent'))) if 'General_Ending' == dict_digger.dig(response, 'output', 'intents', 0, 'intent'): current_action = 'end_conversation' print(current_action) if len(dict_digger.dig(response, 'output', 'entities')) != 0: print(" Entidades: " + str(dict_digger.dig(response, 'output', 'entities', 0, 'entity'))) print(" Contexto: " + str(dict_digger.dig(response, 'context'))) # FIM FUNÇOES DE LOG # INICIO FUNÇOES DE LOG print("OUTPUT") if len(dict_digger.dig(response, 'output', 'intents')) != 0: print(" Intenções: " + str(dict_digger.dig(response, 'output', 'intents', 0, 'intent'))) if len(dict_digger.dig(response, 'output', 'entities')) != 0: print(" Entidades: " + str(dict_digger.dig(response, 'output', 'entities', 0, 'entity'))) print(" Contexto: " + str(dict_digger.dig(response, 'context'))) reponse_text = str(dict_digger.dig( response, 'output', 'generic', 0, 'text')) print(" Resposta do Bot: '" + reponse_text + "'") # FIM FUNÇOES DE LOG # instancia resposta no twilio resp = MessagingResponse() # Define mensagem de resposta twilio resp.message(reponse_text) # Atualize o contexto armazenado com o recebido mais recentemente do diálogo. context = dict_digger.dig(response, 'context') print("####----------Round End-----------####") # envia respota twilio return str(resp) # Finaliza sessão de conversa em caso de intenção de despedida if current_action == 'end_conversation': print("####----------Session End-----------####") response = service.delete_session( assistant_id=assistant_id, session_id=session_id ).get_result()
def test_dict_exception(): dict_digger.dig(TEST_COMPLEX, 'a', 'e', fail=True)
def test_nested_miss(): result = dict_digger.dig(TEST_HASH, 'c', 'b') eq_(None, result)
def test_exception(): dict_digger.dig(TEST_LIST, 0, 'b', 'c', fail=True)
def test_no_exception(): result = dict_digger.dig(TEST_LIST, 0, 'b', 'c') assert result is None
def test_list_find(): result = dict_digger.dig(TEST_LIST, 0, 'b') eq_("tuna", result)
def test_complex_find(): result = dict_digger.dig(TEST_COMPLEX,'a','list',0) assert isinstance(result,dict) result = dict_digger.dig(TEST_COMPLEX,'a','list',0,'x') eq_('peanut butter', result)
def test_list_find(): result = dict_digger.dig(TEST_LIST,0,'b') eq_("tuna", result)
def __get__(self, instance, owner): item = dict_digger.dig(instance.json_response, *self.path) return item
def test_exception(): dict_digger.dig(TEST_LIST,0,'b','c', fail=True)
#Calculates how many minutes past midnight it is in the day mins_1 = (hour_24 * 60) total_mins = int(mins_1 + minute_24) #Prints out every temperature report for today since midnight local time #then finds the highest and lowest values and assigns them as the high and low temperature for today, respectively g = urllib.request.urlopen( 'http://api.synopticdata.com/v2/stations/timeseries?stid=KMSP&vars=air_temp&units=english&qc_remove_data=mark&token=8c96805fbf854373bc4b492bb3439a67&obtimezone=local&recent=%s&output=json' % (total_mins)) json_string_g = g.read() parsed_json_g = json.loads(json_string_g) for each in parsed_json_g['STATION']: observations_g = each['OBSERVATIONS'] all_temps = dig(observations_g, 'air_temp_set_1') all_temps = list(filter(None, all_temps)) max_temp_float_f = max(all_temps) min_temp_float_f = min(all_temps) max_temp_f = "{:.1f}".format(max_temp_float_f) min_temp_f = "{:.1f}".format(min_temp_float_f) max_temp_c = "{:.1f}".format((max_temp_float_f - 32) * (5 / 9)) min_temp_c = "{:.1f}".format((min_temp_float_f - 32) * (5 / 9)) if max_temp_f == -0.0: max_temp_f = 0.0 if min_temp_f == -0.0: min_temp_f = 0.0
def test_nested_find(): result = dict_digger.dig(TEST_HASH,'a','b') eq_("tuna", result)
def __get__(self, instance, cls): try: return dict_digger.dig(instance.response, *self.path, fail=True) except KeyError as IndexError: return None
def test_toplevel_find(): result = dict_digger.dig(TEST_HASH,'a') eq_({'b': 'tuna', 'c': 'fish'}, result)
def index(): #try: content = request.get_json(force=True) requestPart = dict_digger.dig(content, 'request') req_elements = None if not requestPart is None: requestCopy = requestPart.copy() if "date" in requestCopy: del requestCopy["date"] requestMsg = json_to_seldon_message(requestCopy) (req_features, _, req_datadef, req_datatype) = extract_request_parts(requestMsg) req_elements = createElelmentsArray(req_features, list(req_datadef.names)) responsePart = dict_digger.dig(content, 'response') res_elements = None if not responsePart is None: responseCopy = responsePart.copy() if "date" in responseCopy: del responseCopy["date"] responseMsg = json_to_seldon_message(responseCopy) (res_features, _, res_datadef, res_datatype) = extract_request_parts(responseMsg) res_elements = createElelmentsArray(res_features, list(res_datadef.names)) if not req_elements is None and not res_elements is None: for i, (a, b) in enumerate(zip(req_elements, res_elements)): # merged = {**a, **b} content["request_elements"] = a content["response_elements"] = b reqJson = extractRow(i, requestMsg, req_datatype, req_features, req_datadef) resJson = extractRow(i, responseMsg, res_datatype, res_features, res_datadef) content["request"] = reqJson content["response"] = resJson #log formatted json to stdout for fluentd collection print(str(json.dumps(content))) elif not req_elements is None: for i, e in enumerate(req_elements): content["request_elements"] = e reqJson = extractRow(i, requestMsg, req_datatype, req_features, req_datadef) content["request"] = reqJson print(str(json.dumps(content))) elif not res_elements is None: for i, e in enumerate(res_elements): content["response_elements"] = e resJson = extractRow(i, responseMsg, res_datatype, res_features, res_datadef) content["response"] = resJson print(str(json.dumps(content))) else: print(str(json.dumps(content))) sys.stdout.flush() return str(content)
def test_end_point_miss(): result = dict_digger.dig(TEST_HASH, 'a', 'z') eq_(None, result)