def index(): config_file = os.path.join(os.path.dirname(__file__), 'config.json') with open(config_file, 'r') as configFile: config_dict = json.loads(configFile.read()) r = requests.get(config_dict['target_url'], auth=HTTPBasicAuth(config_dict['client_id'], config_dict['client_secret'])) parser = etree.XMLParser(recover=True) d = pq(etree.fromstring(r.text, parser)) elements = [e.attrib['title'] for e in d("label.screen-name")] credential_file = os.path.join(os.path.dirname(__file__), 'credential.json') with open(credential_file, 'r') as dataFile: credential_dict = json.loads(dataFile.read()) credentials = ServiceAccountCredentials.from_json_keyfile_dict( credential_dict, scopes=['https://spreadsheets.google.com/feeds']) gs_client = gspread.authorize(credentials) gfile = gs_client.open_by_key(config_dict['doc_key']) worksheet = gfile.sheet1 records = worksheet.get_all_values() results = [] for i, r in enumerate(records): if i > 0: working = False in_use = False if r[3] != '': in_use = True for e in elements: if r[0] in e: working = True result = { 'id': r[0], 'working': working, 'in_use': in_use, 'notifications': r[3], 'location': r[2] } results.append(result) if in_use is False: logging.info(u'{} is not in use, skipped'.format(r[0])) continue state = States.get_by_id(r[0]) if working is False: if state is None or state.working is True: logging.info( u'{}: looks bad, notification task will be sent to que' .format(r[0])) States(id=r[0], working=False).put() deferred.defer(send_notification, result) else: logging.info( u'{}: looks bad, same status as before'.format(r[0])) else: logging.info(u'{}: looks good'.format(r[0])) States(id=r[0], working=True).put() return render_template('index.html', results=results)
def __init__(self, Data, lstm_neurons, epochs, ax, gradient=0.9056, dense_neruons=3): self.lstm = LSTM(lstm_neurons, dense_neruons) self.lstm.create_model() self.lstm.fit_model(epochs, Data) self.lstm.print_stats() self.states = States(4000, 23000) self.states.create_unperturbed(self.lstm, Data) self.states.create_perturbed(self.lstm, Data) self.Lyapunov = Lyapunov(self.states) self.Lyapunov.plot_exponent(ax, gradient)
def state_abb(): """ seed states abbreviations into the database """ res = requests.get(STATES_API) resp = res.json() length = len(resp) for y in range(0, length): states_abb = resp[y]['state'] state_fips = resp[y]['fips'] state = States(state=state_fips, state_abb=states_abb) db.session.add(state) db.session.commit()
def add_statistics(): try: for data in request.json['country_statistics']: country = data['country'] code = data['code'] flag = data['flag'] coordinates = data['coordinates'] confirmed = data['confirmed'] deaths = data['deaths'] recovered = data['recovered'] all_states = [] for state in data['states']: states = States(key=state['key'], name=state['name'], address=state['address'], latitude=state['latitude'], longitude=state['longitude'], confirmed=state['confirmed'], deaths=state['deaths'], recovered=state['recovered']) states.save() all_states.append(states) statistics = Statistics(country=country, code=code, flag=flag, coordinates=coordinates, confirmed=confirmed, deaths=deaths, recovered=recovered, states=all_states) statistics.save() return make_response(jsonify({"success": request.json}), 201) except KeyError: abort(400)
def state_fc(): chatid = request.json['chat_id'] stage = request.json['stage'] ratee_email = request.json['ratee_email'] course_code = request.json['course_code'] skills = request.json['skills'] rater_email = request.json['rater_email'] valid_chatid = States.query.filter_by(chatid=chatid).first() if valid_chatid is None: new_entry = States(chatid=chatid,stage=stage,ratee_email1=ratee_email,course_code1=course_code,rate_skill1=skills,rater_email1=rater_email) db.session.add(new_entry) db.session.commit() return(jsonify('{} has been entered'.format(chatid)),200)
init_db() states_abbr = [ 'al', 'ak', 'az', 'ar', 'ca', 'co', 'ct', 'dc', 'de', 'fl', 'ga', 'hi', 'id', 'il', 'in', 'ia', 'ks', 'ky', 'la', 'me', 'md', 'ma', 'mi', 'mn', 'ms', 'mo', 'mt', 'ne', 'nv', 'nh', 'nj', 'nm', 'ny', 'nc', 'nd', 'oh', 'ok', 'or', 'pa', 'ri', 'sc', 'sd', 'tn', 'tx', 'ut', 'vt', 'va', 'wa', 'wv', 'wi', 'wy' ] df = pd.read_csv('national_parks.csv', encoding='latin-1') df.drop_duplicates(subset='Name', inplace=True) df = df.reset_index() for state in states_abbr: new_states = States(Name=state.upper()) for j in range(len(df)): if state.upper() in df['Challenge State'][j]: check_park = session.query(National_parks).filter_by( Name=df['Name'][j]).first() if check_park: check_park.state.append(new_states) else: new_parks = National_parks( Name=df['Name'][j], Type=df['Type'][j], Location=df['Location'][j], Description=df['Description'][j], Challenge_states=df['Challenge State'][j],