def get_company(parameters): print(parameters) company_industry = parameters.get('company_industry', '') company_size = parameters.get('company_size', '') url = "https://www.themuse.com/api/public/companies?" params = { 'industry': company_industry, 'size': company_size, 'api_key': get_api_key(), 'page': '1', 'descending': 'false' } request_url = url + urlencode(params) print(request_url, 'this is url') r = requests.get(request_url) results = r.json()['results'][:3] if not results: return 'No companies for your query' print(results, '*********jobs') data = [{ 'name': result['name'], 'company': result['description'], 'link': result['refs']['landing_page'] } for result in results] saveToDatabase(data) return parse_dict(data)
def track(update, context): api_key = get_api_key() query = {'q': ' '.join(context.args), 'appid': api_key, 'units':'metric'} response = requests.get('https://api.openweathermap.org/data/2.5/weather', params=query) if response.status_code == 200: data = response.json() output = f''' *Weather Report of {data['name']}, {data['sys']['country']}* - - - - - *Weather group:* {data['weather'][0]['main']} *Weather condition:* {data['weather'][0]['description'].capitalize()} *Current temperature:* {data['main']['temp']} \N{DEGREE SIGN}C *Feels like:* {data['main']['feels_like']} \N{DEGREE SIGN}C *Atmospheric pressure:* {data['main']['pressure']} hPa *Humidity:* {data['main']['humidity']}% ''' output = textwrap.dedent(output) context.bot.send_message(chat_id=update.effective_chat.id, parse_mode='Markdown', text=output) else: output = ''' No data found. Please enter the command in the following format: _/track <city name>, <country code (optional)>_ *Example:* _/track Paris_ or _/track Paris, FR_ ''' output = textwrap.dedent(output) context.bot.send_message(chat_id=update.effective_chat.id, parse_mode='Markdown', text=output)
def get_jobs(parameters): #print(parameters) job_type = parameters.get('job_type', '') job_level = parameters.get('job_level', '') url = "https://www.themuse.com/api/public/jobs?" #print(job_type,job_level) params = { 'category': job_type, 'level': job_level, 'api_key': get_api_key(), 'page': '1', 'descending': 'false' } #what to do request_url = url + urlencode(params) #print(request_url,'this is url') r = requests.get(request_url) results = r.json()['results'][:3] if not results: return 'No jobs for your query' #print(results,'*********jobs') data = [{ 'name': result['name'], 'company': result['company']['name'], 'link': result['refs']['landing_page'] } for result in results] saveToDatabase(data) return parse_dict(data)
def find_user_favorite_list(used_id): """Returns a list of all listing_ids favorited by the user.""" # In Etsy API as findAllUserFavoriteListings r = requests.get('https://openapi.etsy.com/v2/users/:user_id/favorites/\ listings?api_key={{api_key}}'.format(api_key=api_key.get_api_key())) if r == '403': return None
def find_listing_favorites(listing_id): """ Returns a list of users who have favorited the passed listing. Takes an Etsy listing_id and makes a findAllListingFavoredBy request to get back a set of FavoriteListing objects. """ # In Etsy API as findAllListingFavoredBy r = requests.get('https://openapi.etsy.com/v2/listings/:listing_id/\ favored-by?api_key={{api_key}}'.format(api_key=api_key.get_api_key())) if r == '403': return None
def find_user_id(user_name): """ Returns an int which is the numerical user_id for the user_name input. Takes a string which is the Etsy user_name and returns the int user_id, but only if the user has a public account. The user_id for private users cannot be accessed. """ r = requests.get("https://openapi.etsy.com/v2/users/\ :kpish?api_key={{api_key}}".format(api_key=api_key.get_api_key())) if r == '403': return None
def getData(): global df global test_size global train_size global x_train global x_test global y_train global y_test global stock global close_data_raw global input_dim global hidden_dim global num_layers global output_dim global num_epochs global sample_size global model global optimizer global loss_func global loss_vals global epoch epoch = 0 stock = request.json["stock"] # get the api key api_key = get_api_key() # get the response response = requests.get( "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol=" + stock + "&interval=15min&slice=year1month1&adjusted=true&apikey=" + api_key) # parse the text txt = response.text.split("\r\n") # error 1: too many api requests if txt == [ '{\n "Note": "Thank you for using Alpha Vantage! Our standard API call frequency is 5 calls per minute and 500 calls per day. Please visit https://www.alphavantage.co/premium/ if you would like to target a higher API call frequency."\n}' ]: return ({"error_exists": True, "error": "api"}) data = [t.split(",") for t in txt] data.reverse() # error 2: ticker symbol doesn't exist if len(data) < 5: return ({"error_exists": True, "error": "ticker"}) # convert list of lists into pandas dataframe df = pd.DataFrame(data[1:-1]) df.columns = data[-1] df_data = df[["open", "high", "low", "close", "volume"]].astype(float) # normalize data norm_df = (df_data - df_data.min()) / (df_data.max() - df_data.min()) # define lookback lookback = 20 # get the raw data and declare our input data close_data_raw = norm_df[["close"]].to_numpy() close_data = [] # iterate through and add the appropriate points for index in range(len(close_data_raw) - lookback): close_data.append(close_data_raw[index:index + lookback]) # convert to a numpy array close_data = np.array(close_data) # calculate appropriate sizes test_size = int(np.round(0.2 * close_data.shape[0])) train_size = close_data.shape[0] - (test_size) # x value arrays x_train_arr = close_data[:train_size, :-1, :] x_test_arr = close_data[train_size:, :-1] # y value arrays y_test_arr = close_data[train_size:, -1, :] y_train_arr = close_data[:train_size, -1, :] # convert x values into torches x_train = torch.from_numpy(x_train_arr).type(torch.Tensor) x_test = torch.from_numpy(x_test_arr).type(torch.Tensor) # convert y values into torches y_train = torch.from_numpy(y_train_arr).type(torch.Tensor) y_test = torch.from_numpy(y_test_arr).type(torch.Tensor) # declare needed variables input_dim = 1 hidden_dim = 32 num_layers = 2 output_dim = 1 num_epochs = 100 sample_size = train_size + test_size # create the model model = GRU(input_dim, hidden_dim, num_layers, output_dim) # choose the loss function loss_func = torch.nn.MSELoss(reduction='mean') # select an optimizer algorithm optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # declare needed variables loss_vals = np.zeros(num_epochs) return {"error_exists": False}
import api_key API_URL = 'https://www.bungie.net/platform/Destiny' PS4_TYPE = '2' ALL_TYPE = 'All' USER_NAME = 'nedds9' ID_KEY = 'membershipId' HEADERS = {"X-API-Key": api_key.get_api_key()} PLAYER_PATH = '%s/SearchDestinyPlayer/%s/%s/' % (API_URL, PS4_TYPE, USER_NAME) def get_stats_path(membership_id): return '%s/Stats/Account/%s/%s/' % (API_URL, PS4_TYPE, membership_id)
from buoy_system import get_buoy_info app = Flask(__name__) locations_try = { 'Seattle': '98117', 'Redmond': '98052', 'Bellevue': '98008', 'Renton': '98055', 'Tacoma': '98402' } locations_coastal = {} buoys = ['WPOW1', '46120'] api_token = "charlie265" api_key = get_api_key(1) def knots_conversion(speed): try: return round(speed * 1.944, 1) except: return speed @app.route('/', methods=['GET']) def home(): gathered_info = {} sorted_buoy_info = {} for city_name, zip_code in locations_try.items(): url = f"http://api.openweathermap.org/data/2.5/weather?zip={zip_code},us&APPID={api_key}"