def __init__(self,): self.num_dancers = 0 # number of connected dancers self.is_idle = True self.counter = 0 self.init_counter = 24 self.start_time = time.time() dance_model_path = "model_weights.json" dance_scaler_path = "dnn_std_scaler.bin" self.ml = ML( dance_scaler_path=dance_scaler_path, dance_model_path=dance_model_path, ) self.skip_initial_readings_cnt = [[50, 50], [50, 50], [50, 50]]
def test_curated_playlist(self): os.mkdir("1") tracks_to_classify = [{ 'url': "https://p.scdn.co/mp3-preview/b326e03624cb098d8387e17aa46669edac0d025a?cid=774b29d4f13844c495f206cafdad9c86", "id": "2takcwOaAZWiXQijPHIx7B" }] tracks_ids = ModelHandler().curated_tracks(tracks_to_classify, "1") assert tracks_ids is None ML().train_model("test_music", "test_music_2", path_to_save="1/model") track_ids = ModelHandler().curated_tracks(tracks_to_classify, "1") assert isinstance(track_ids, list) shutil.rmtree("1")
def downloadAndProcessFiles(s3files, action): for s3filename in s3files: # Construct a reference to the S3 object so we can download it. k = boto.s3.key.Key(bucket) k.key = s3filename try: contents = k.get_contents_as_string() except: print "Error: S3 couldn't download "+s3filename if (action == "train"): ML.handleTrainFile(content) elif (action == "input"): ML.handleInputFile(content) elif (action == "output"): output = ML.handleOutputFile(content) return output else: # Doesn't make sense, it's got to be one of the three APIs return False return True
def pred(): '''get daily prediction data''' # connect database to access real-time data u = connectMysql() v = getForecast() # get data from frontend start_station = request.args.get("start_station") destination_station = request.args.get("destination_station") day_of_travel = request.args.get("day_of_travel") hour_of_travel = request.args.get("hour_of_travel") # print(start_station,destination_station,day_of_travel,hour_of_travel) # get backend data of the corresponding stations for i in range(len(u)): if start_station == u[i][2]: start_station_number = u[i][1] start_banking = u[i][6] start_bike_stands = u[i][8] if destination_station == u[i][2]: end_station_number = u[i][1] end_banking = u[i][6] end_bike_stands = u[i][8] # get weather forecast information according to the user's input day = pd.to_datetime(day_of_travel) day = day.weekday() + 1 weather_info = [] for i in range(len(v)): date = (v[i][0].split(" "))[0] hour = int(v[i][1]) if int(hour_of_travel) == 23: if (str(pd.to_datetime(day_of_travel) + datetime.timedelta(days=1)).split(" ")[0] == str( date)) and hour == 0: weather_info = v[i] break elif str(date) == str(day_of_travel): if abs(int(hour_of_travel) - hour) <= 1.5: weather_info = v[i] break if len(weather_info) == 0: weather_info = v[-1] temp = weather_info[2] feels_like = weather_info[3] temp_min = weather_info[4] temp_max = weather_info[5] pressure = weather_info[6] humidity = weather_info[7] wind_speed = weather_info[8] main_describe = weather_info[9] # encode categorical features if start_banking == 'False': start_banking = 0 else: start_banking = 1 if end_banking == 'False': end_banking = 0 else: end_banking = 1 if main_describe == "Clouds": main_describe = 0 elif main_describe == "Drizzle": main_describe = 1 elif main_describe == "Rain": main_describe = 2 else: main_describe = 3 # get historical data to print the first and second charts s, d = getHistoricalData(start_station_number, end_station_number, hour_of_travel) x_axis = [] y_axis_bike = [] y_axis_stands = [] for i in range(7): x_axis.append(s[i][1].split(' ')[0]) y_axis_bike.append(s[i][11]) y_axis_stands.append(s[i][10]) # invoke prediction program and feed all parameters start_arr = [float(day), float(hour_of_travel), float(start_station_number), float(start_bike_stands), float(start_banking), float(main_describe), float(temp), float(feels_like), float(temp_min), float(temp_max), float(wind_speed), float(pressure), float(humidity)] end_arr = [float(day), float(hour_of_travel), float(end_station_number), float(end_bike_stands), float(end_banking), float(main_describe), float(temp), float(feels_like), float(temp_min), float(temp_max), float(wind_speed), float(pressure), float(humidity)] result_1 = ML.predict_available_bike(start_arr) result_2 = ML.predict_available_stands(end_arr) final = { "bike_available": result_1 , "stands_available": result_2 , "x_axis": x_axis , "y_axis_bike": y_axis_bike , "y_axis_stands": y_axis_stands , "description": main_describe } return json.dumps(final)
def test_classify_tracks(self): ml = ML() ml.classify_tracks(['test_music/514q3otlT6HczfChuLDUSa.mp3'], "tester_model", "test_music")
metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('-non-interactive', help='Suitable for script output.', action='store_true', default=False) args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) myreports = ml.my_reports_list() print "+{:s}+".format("-" * 96) sys.stdout.write('| {:50s} | {:4s} | {:4s} | {:10s} | {:14s} |\n'.format( "name", "did", "jid", "status", "time")) print "+{:s}+".format("-" * 96) total_time = None for report in myreports: did = str(report["definitionId"]) name = report["definitionName"] # run start = datetime.now()
#!/usr/bin/python from ML import ML, parse_url import argparse import sys parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.flush_myreports()
parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('-filename', help='Destination filename') parser.add_argument('-dir', help='Destination directory') parser.add_argument('-timestamp', help='YYMMDD_HHMM_ZZZ') # TODO: add a timestamp option args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) plan = ml.get_plan(args['timestamp']) if args['dir']: dir = args['dir'] + "/" else: dir = "./" if plan['timestamp']: # we retrieved something if args['filename']: path = dir + args['filename'] else: path = dir + plan['timestamp'] + ".pln" data = plan['file-content'] with open(path, 'w') as f:
type=str, help='http[s]://username:password@server.') parser.add_argument('jid', metavar='did', type=str, help='Report JobId.') parser.add_argument('-columns', default=None, help='Comma-delimited list of columns to display.') parser.add_argument('-csv', help='Display in csv mode.', action='store_true') parser.add_argument('-count', help='Number of rows to display', default=100) parser.add_argument('-filtered', help='Use report output filters.', action='store_true') args = parser.parse_args() try: (username, password, server) = parse_url(args.url) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) columns = map(int, args.columns.split(",")) if args.columns else None # columns = map(int, args.columns.split(",")) if args.columns else None if not args.filtered: r = myml.get_report(args.jid, columns, args.count) else: r = myml.get_filtered_report(args.jid, columns, args.count) if args.csv: r.print_csv() else: r.print_col()
def edit(): json_list = [] ##trump=pd.read_csv("trump_tweets_classified.csv") tweets = ML() tweets2 = tweets[[ "id", "Created at", "Screen Name", "Tweet Text", "sentiment", "Subject" ]] tweets2["Frequency"] = 1 #modifying the date-time string time_string = tweets2["Created at"].to_list() time_edited = [] for item in time_string: item2 = item.replace(" +0000", "") time_edited.append(item2) tweets2["Created at"] = time_edited tweets2['Created at'] = pd.to_datetime(tweets2['Created at'], infer_datetime_format=True) tweets2['date'] = tweets2['Created at'].dt.date #most frequent dates most are from the same day, wil lscreq axis if not all from the same day date = pd.DataFrame(tweets2.groupby("date")["Frequency"].count()) date = date.sort_values(by='Frequency', ascending=False) #to keep majority of the graph, only display one day of data date = date.head(1) day = date.index.values[0] edited_time_tweets = tweets2.loc[tweets2["date"] == day, [ "date", "Created at", "Screen Name", "Tweet Text", "sentiment", "Subject", "Frequency" ]] edited_time_tweets = edited_time_tweets.dropna() edited = edited_time_tweets.sort_values(by='Created at') edited = edited.rename(columns={"Created at": "datetime"}) edited = edited.drop(columns=['date']) #json positive sentiment only_positive = edited.loc[edited["sentiment"] == "positive", :] positive_frequency = only_positive[["datetime", "Frequency", "Subject"]] positive_frequency_trump = positive_frequency.loc[ positive_frequency["Subject"] == "Trump", :] positive_frequency_andrews = positive_frequency.loc[ positive_frequency["Subject"] == "Andrews", :] only_positive_trump = positive_frequency_trump.resample( '0.05T', on='datetime').sum() only_positive_trump = only_positive_trump.reset_index() only_positive_trump["sentiment"] = "positive" only_positive_andrews = positive_frequency_andrews.resample( '1H', on='datetime').sum() only_positive_andrews = only_positive_andrews.reset_index() only_positive_andrews["sentiment"] = "positive" #json negative sentiment only_negative = edited.loc[edited["sentiment"] == "negative", :] negative_frequency = only_negative[["datetime", "Frequency", "Subject"]] negative_frequency_trump = negative_frequency.loc[ negative_frequency["Subject"] == "Trump", :] negative_frequency_andrews = negative_frequency.loc[ negative_frequency["Subject"] == "Andrews", :] only_negative_andrews = negative_frequency_andrews.resample( '1H', on='datetime').sum() only_negative_andrews = only_negative_andrews.reset_index() only_negative_andrews["sentiment"] = "negative" only_negative_trump = negative_frequency_trump[["datetime", "Frequency"]] only_negative_trump = negative_frequency_trump.resample( '0.05T', on='datetime').sum() only_negative_trump = only_negative_trump.reset_index() only_negative_trump["sentiment"] = "negative" frames = [only_positive_trump, only_negative_trump] sentiment_concat = pd.concat(frames) sentiment_concat['datetime'] = sentiment_concat['datetime'].astype(str) sentiment_group = sentiment_concat.groupby(['sentiment', "datetime"]) sentiment_group_2 = pd.DataFrame(sentiment_group["Frequency"].sum()) results1 = defaultdict(lambda: defaultdict(dict)) for index, value in sentiment_group_2.itertuples(): for i, key in enumerate(index): if i == 0: nested = results1[key] elif i == len(index) - 1: nested[key] = value else: nested = nested[key] frames = [only_positive_andrews, only_negative_andrews] sentiment_concat = pd.concat(frames) sentiment_concat['datetime'] = sentiment_concat['datetime'].astype(str) sentiment_group = sentiment_concat.groupby(['sentiment', "datetime"]) sentiment_group_2 = pd.DataFrame(sentiment_group["Frequency"].sum()) results2 = defaultdict(lambda: defaultdict(dict)) for index, value in sentiment_group_2.itertuples(): for i, key in enumerate(index): if i == 0: nested = results2[key] elif i == len(index) - 1: nested[key] = value else: nested = nested[key] tweets = {"Trump": results1, "Andrews": results2} json_list.append(tweets) trump2 = tweets2.loc[tweets2["Subject"] == "Trump", :] trump_sentiment = trump2.groupby(['sentiment'])["Frequency"].count() total_sentiment = trump2["sentiment"].count() percentage_sentiment_trump = ((trump_sentiment / total_sentiment) * 100).round() percentage_trump = percentage_sentiment_trump.to_dict() andrews2 = tweets2.loc[tweets2["Subject"] == "Andrews", :] andrews_sentiment = andrews2.groupby(['sentiment'])["Frequency"].count() total_sentiment = andrews2["sentiment"].count() percentage_sentiment_andrews = ((andrews_sentiment / total_sentiment) * 100).round() percentage_andrews = percentage_sentiment_andrews.to_dict() percentage_dict = { "Trump": percentage_trump, "Andrews": percentage_andrews } json_list.append(percentage_dict) example = trump2.head(5) screen_name = example["Screen Name"].to_list() tweet_text = example["Tweet Text"].to_list() res = {screen_name[i]: tweet_text[i] for i in range(len(screen_name))} example2 = andrews2.head(5) screen_name2 = example2["Screen Name"].to_list() tweet_text2 = example2["Tweet Text"].to_list() res2 = {screen_name2[i]: tweet_text2[i] for i in range(len(screen_name))} tweet_dict = {"Trump": res, "Andrews": res2} json_list.append(tweet_dict) return json_list
'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'table', metavar='table', type=str, help='The custom table to import data into.') parser.add_argument( 'file', metavar='data_file', type=str, help='Custom data file to import.') parser.add_argument( '-timestamp', metavar='timestamp', type=str, help='timestamp for import.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.import_data(args['table'],args['file'], args['timestamp']) if r.status_code != 201: # exit if error print "Failed to submit the job to import data with error ", r.status_code, r.text sys.exit(1) else: job_id = r.text print "Data import job is scheduled with job id = ", job_id i = 0 while (i<10): print "Check every 2 seconds to see if the job is complete." status = ml.scheduler_job_status(job_id) if status == 'COMPLETED': break else:
#!/usr/bin/python from ML import ML, parse_url import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.flush_myreports()
parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('object', metavar='object', help='Interfaces|LSPs|Nodes|Demands.') parser.add_argument('property', help='Property.') parser.add_argument('keys', help='"|" delimited list of keys.') parser.add_argument('from', help='from date YYMMDD_HHMM_UTC.') parser.add_argument('to', help='to date YYMMDD_HHMM_UTC.') parser.add_argument('-keyColumns', help='list of key columns using comma to separate.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) #r = myml.explore(args['object'], args['filter'], 100, {'SetupBW', 'SourceNode'}) date_pattern = '%y%m%d_%H%M_%Z' date_from = time.strptime(args['from'], date_pattern) date_to = time.strptime(args['to'], date_pattern) if args['keyColumns']: print myml.time_series(args['object'], args['property'], args['keys'].split("|"), date_from, date_to, args['keyColumns'].split(",")) else: print myml.time_series(args['object'], args['property'], args['keys'].split("|"), date_from, date_to) #print myml.time_series("Interfaces", "TraffIn", ["AM_LA_BB2", "TenGigE0/2/2"])
from ML import ML, parse_url parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('jid', metavar='did', type=str, help='Report JobId.') parser.add_argument('-columns', default=None, help='Comma-delimited list of columns to display.') parser.add_argument('-csv', help='Display in csv mode.', action='store_true') parser.add_argument('-count', help='Number of rows to display', default=100) parser.add_argument('-filtered', help='Use report output filters.', action='store_true') args = parser.parse_args() try: (username, password, server) = parse_url(args.url) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) columns = map(int, args.columns.split(",")) if args.columns else None # columns = map(int, args.columns.split(",")) if args.columns else None if not args.filtered: r = myml.get_report(args.jid, columns, args.count) else: r = myml.get_filtered_report(args.jid, columns, args.count) if args.csv: r.print_csv() else: r.print_col()
import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('tablename', metavar='demo', type=str, help='Table Name to add series columns.') parser.add_argument('file', metavar='jy_table3.json', type=str, help='Table Definition json file to load new columns.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.add_columns(args['tablename'], args['file']) if r.status_code == 200: # exit if error print "Added new column(s) in ", r.text, " successfully." else: print "Failed to add new series in exisitng custom table with error", r.status_code, r.text
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) print json.dumps(ml.props(), indent=4)
help='Interfaces|LSPs|Nodes|Demands.') parser.add_argument('property', help='Property.') parser.add_argument('keys', help='"|" delimited list of keys.') parser.add_argument('from', help='from date YYMMDD_HHMM_UTC.') parser.add_argument('to', help='to date YYMMDD_HHMM_UTC.') parser.add_argument('-keyColumns', help='list of key columns using comma to separate.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) #r = myml.explore(args['object'], args['filter'], 100, {'SetupBW', 'SourceNode'}) date_pattern = '%y%m%d_%H%M_%Z' date_from = time.strptime(args['from'], date_pattern) date_to = time.strptime(args['to'], date_pattern) if args['keyColumns']: print myml.time_series(args['object'], args['property'], args['keys'].split("|"), date_from, date_to, args['keyColumns'].split(",")) else: print myml.time_series(args['object'], args['property'], args['keys'].split("|"), date_from, date_to) #print myml.time_series("Interfaces", "TraffIn", ["AM_LA_BB2", "TenGigE0/2/2"])
from flask import Flask, jsonify, Response, request # from ML import ML from ML import ML from DB import DB from Social import Social from SessionManager import SessionManager from flask_cors import CORS app = Flask(__name__) cors = CORS(app, resources={r"/*": {"origins": "*"}}) ml = ML() db = DB() social = Social([db, ml]) sessMan = SessionManager(db, social) @app.route('/sessMan/getTweets', methods=['get']) def runGetTweets(): PID = request.args.get('key')[1:] count = db.getCountPerDay(PID)[0][0] print(count) return sessMan.getTweets(PID, count), 200 @app.route('/sessMan/isRunning', methods=['get']) def profileIsRunning(): PID = request.args.get('key')[1:] return str(sessMan.isProfileRunning(PID)) @app.route('/sessMan/stopProfile', methods=['get'])
class ModelHandler: def __init__(self): self.spotify_api = SpotifyAPI() self.ml = ML() def _write_mp3s(self, path, track_dict): content = self.spotify_api.get_mp3(track_dict['url']) if content: write_file(f'{path}/{track_dict["id"]}.mp3', content) def write_mp3s(self, tracks_dict, path): func = partial(self._write_mp3s, path) with Pool(4) as p: p.map(func, tracks_dict) def create_model(self, uid, tracks_dict): if uid not in os.listdir(): os.mkdir(uid) os.mkdir(f"{uid}/liked") self.write_mp3s(tracks_dict, f"{uid}/liked") self.ml.train_model(f"{uid}/liked", path_to_save=f"{uid}/model", uid=uid) def classify_tracks(self, training_tracks, tracks_to_classify, search_term, uid): if uid not in os.listdir(): return None if search_term not in os.listdir(): os.mkdir(search_term) self.write_mp3s(training_tracks, f"{search_term}") self.ml.train_model(f"{search_term}", path_to_save=f"{search_term}/model") file_paths = [] for track_to_classify in tracks_to_classify: file_paths.append(f"{uid}/liked/{track_to_classify['id']}.mp3") track_ids = self.ml.classify_tracks(file_paths, f"{search_term}/model", search_term) else: file_paths = [] for track_to_classify in tracks_to_classify: file_paths.append(f"{uid}/liked/{track_to_classify['id']}.mp3") track_ids = self.ml.classify_tracks(file_paths, f"{search_term}/model", search_term) return track_ids def curated_tracks(self, tracks_to_classify, uid): if f"{uid}" not in os.listdir() or "model" not in os.listdir(f"{uid}"): return None os.mkdir(f'{uid}/tmp') self.write_mp3s(tracks_to_classify, f"{uid}/tmp") file_paths = [] for track_to_classify in tracks_to_classify: file_paths.append(f"{uid}/tmp/{track_to_classify['id']}.mp3") track_ids = self.ml.classify_tracks(file_paths, f"{uid}/model", "liked") shutil.rmtree(f'{uid}/tmp') return track_ids def check_personal_model(self, user_id): files = os.listdir() if user_id in files and 'model' in os.listdir(user_id): return 1 elif user_id in files and 'model' not in os.listdir(user_id): return 0 else: return -1
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) print json.dumps(ml.props(), indent=4)
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'file', metavar='file.txt', type=str, help='Property json file to load.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.load_props_from_file(args['file'])
from ML import ML ''' data found here: http://files.pushshift.io/reddit/ API docs here: https://github.com/pushshift/api don't request more than ~1/sec. Each request can return <500 terms. getting 1000 posts each for 100 users each from 4 subreddits (i.e., 800 requests) took 48 minutes, so ~4s/request. Hmmm. ''' ml = ML('/home/declan/Documents/code/reddit_DS/savedat/nyc_losangeles_unitedkingdom_greece_indonesia_japan_newzealand_1000users_24bins_00-16-17') #ml = ML('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat1/') ml.addExtraDataSets('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat1/') #ml.addExtraDataSets('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat2/') #ml.prettyPrintDB(ml.df) #ml.postAvgTimesByRegion() #ml.simpleLinReg() #ml.cyclicMetricSGD(alpha=10**-4, timesteps=4000, show_plot=False) ml.NN1() exit(0) ml.cyclicMetricSGD(alpha=10**-3, timesteps=4000, show_plot=False) ml.cyclicMetricSGD(alpha=10**-3, timesteps=40000, show_plot=False) ml.cyclicMetricSGD(alpha=10**-5, timesteps=40000, show_plot=False)
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('file', metavar='file.txt', type=str, help='Property json file to load.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.load_myreports_from_file(args['file'])
#!/usr/bin/python from ML import ML, parse_url import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.flush_props()
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'table', metavar='jy_table1', type=str, help='Table name to be dropped.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.drop_table(args['table']) if r.status_code == 200: # exit if error print "Table", r.text, "is deleted successfully." else: print "Failed to delete the table with error", r.status_code, r.text
parser.add_argument('object', metavar='object', help='Interfaces|LSPs|Nodes|Demands') parser.add_argument('-filter', help='MATE Live filter') parser.add_argument('-properties', help='Comma-delimited list of propeties.') parser.add_argument('-csv', help='Display in csv mode.', action='store_true') parser.add_argument('-count', default=10, help='Count of objects to display.') parser.add_argument('-sort-property', help='Name of the property used to sort the results.') parser.add_argument('-sort-direction', help='Sort direction.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) #r = myml.explore(args['object'], args['filter'], 100, {'SetupBW', 'SourceNode'}) if args['properties']: properties = args['properties'].split(",") else: properties = None r = myml.explore(args['object'], args['filter'], args['count'], properties, args['sort_property'], args['sort_direction']) if args['csv']: r.print_csv() else: r.print_col()
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='http[s]://username:password@server', type=str, help='url: http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) print json.dumps(ml.my_reports_definitions(), indent=4)
from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'table', metavar='custom_table', type=str, help='Table name to be updated.') parser.add_argument( 'column', metavar='custom_table_column', type=str, help='Column name to be updated.') parser.add_argument( 'status', metavar='true_false', type=str, help='Activate or inactivate a given column.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.update_column(args['table'], args['column'], args['status']) if r.status_code == 200: # exit if error print "Updated",args['column']," status to ",args['status'], " in ",args['table'], " successfully." else: print "Failed to update column status in the custom table with error", r.status_code, r.text
from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('file', metavar='jy_table3.json', type=str, help='Table Definition json file to load.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.create_table(args['file']) if r.status_code == 200: # exit if error print "New table", r.text, "is created successfully." else: print "Failed to create the new table with error", r.status_code, r.text
from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('table', metavar='jy_table1', type=str, help='Table name to be dropped.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.drop_table(args['table']) if r.status_code == 200: # exit if error print "Table", r.text, "is deleted successfully." else: print "Failed to delete the table with error", r.status_code, r.text
#!/usr/bin/python from ML import ML, parse_url import argparse import sys parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.flush_props()
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('object', metavar='object', help='Interfaces|LSPs|Nodes|Demands') parser.add_argument('-filter', help='MATE Live filter') parser.add_argument('-properties', help='Comma-delimited list of propeties.') parser.add_argument('-csv', help='Display in csv mode.', action='store_true') parser.add_argument('-count', default=10, help='Count of objects to display.') parser.add_argument('-sort-property', help='Name of the property used to sort the results.') parser.add_argument('-sort-direction', help='Sort direction.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) myml = ML(server, {'username': username, 'password': password}) #r = myml.explore(args['object'], args['filter'], 100, {'SetupBW', 'SourceNode'}) if args['properties']: properties = args['properties'].split(",") else: properties = None r = myml.explore(args['object'], args['filter'], args['count'], properties, args['sort_property'], args['sort_direction']) if args['csv']: r.print_csv() else: r.print_col()
def predict_all(): '''get hourly prediction data on the given date, basically just repeat the same prediction process 24 fimes''' u = connectMysql() v = getForecast() start_station = request.args.get("start_station") destination_station = request.args.get("destination_station") day_of_travel = request.args.get("day_of_travel") hour_of_travel = request.args.get("hour_of_travel") print(start_station, destination_station, day_of_travel, hour_of_travel) for i in range(len(u)): if start_station == u[i][2]: start_station_number = u[i][1] start_banking = u[i][6] start_bike_stands = u[i][8] if destination_station == u[i][2]: end_station_number = u[i][1] end_banking = u[i][6] end_bike_stands = u[i][8] day = pd.to_datetime(day_of_travel) day = day.weekday() + 1 weather_info = [] result_1 = {} result_2 = {} # hour changes from 0 to 23 and the else arguments are the same for clock in range(24): for i in range(len(v)): date = (v[i][0].split(" "))[0] hour = int(v[i][1]) if int(hour_of_travel) == 23: if (str(pd.to_datetime(day_of_travel) + datetime.timedelta(days=1)).split(" ")[0] == str( date)) and hour == 0: weather_info = v[i] break elif str(date) == str(day_of_travel): if abs(int(clock) - hour) <= 1.5: weather_info = v[i] break if len(weather_info) == 0: weather_info = v[i] temp = weather_info[2] feels_like = weather_info[3] temp_min = weather_info[4] temp_max = weather_info[5] pressure = weather_info[6] humidity = weather_info[7] wind_speed = weather_info[8] main_describe = weather_info[9] if start_banking == 'False': start_banking = 0 else: start_banking = 1 if end_banking == 'False': end_banking = 0 else: end_banking = 1 if main_describe == "Clouds": main_describe = 0 elif main_describe == "Drizzle": main_describe = 1 elif main_describe == "Rain": main_describe = 2 else: main_describe = 3 start_arr = [float(day), float(hour), float(start_station_number), float(start_bike_stands), float(start_banking), float(main_describe), float(temp), float(feels_like), float(temp_min), float(temp_max), float(wind_speed), float(pressure), float(humidity)] end_arr = [float(day), float(hour), float(end_station_number), float(end_bike_stands), float(end_banking), float(main_describe), float(temp), float(feels_like), float(temp_min), float(temp_max), float(wind_speed), float(pressure), float(humidity)] result_1[clock] = ML.predict_available_bike(start_arr) result_2[clock] = ML.predict_available_stands(end_arr) final = { "24hour_bikes": result_1 , "24hour_stands": result_2 } return json.dumps(final)
# History: # - 02/11/13 Jonathan Garzon initial version #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ from ML import ML, parse_url import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument('did', metavar='did', type=str, help='Report definitionId.') parser.add_argument('-filtered', help='Use report output filters.', action='store_true') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) filtered = args['filtered'] jid = ml.last_job(args['did']) if filtered: ml.get_filtered_report(jid, filtered, count=1000000).print_csv() else: print ml.get_csv_file(jid)
def test_train_model(self): ml = ML() ml.train_model("test_music", "test_music_2", path_to_save="tester_model") assert "tester_model" in os.listdir()
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'file', metavar='file.txt', type=str, help='Property json file to load.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) ml.load_myreports_from_file(args['file'])
from intcomm import IntComm from ML import ML if __name__ == "__main__": dance_model_path = "model_weights.json" dance_scaler_path = "dnn_std_scaler.bin" ml = ML( dance_scaler_path=dance_scaler_path, dance_model_path=dance_model_path, ) # change this according to your serial port # 0: "/dev/ttyACM0" # 1: "/dev/ttyACM1" # 2: "/dev/ttyACM2" intcomm = IntComm(0) while True: data = intcomm.get_line() if len(data) == 0 or data[0] != "#": print("Invalid data:", data) continue data = data[1:].split(",") if len(data) == 10: yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz, emg = data yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz = ( float(yaw), float(pitch),
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'tablename', metavar='demo', type=str, help='Table Name to add series columns.') parser.add_argument( 'file', metavar='jy_table3.json', type=str, help='Table Definition json file to load new columns.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.add_columns(args['tablename'], args['file']) if r.status_code == 200: # exit if error print "Added new column(s) in ", r.text," successfully." else: print "Failed to add new series in exisitng custom table with error", r.status_code, r.text
def __init__(self): self.spotify_api = SpotifyAPI() self.ml = ML()
import json from time import sleep from datetime import datetime parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("url", metavar="url", type=str, help="http[s]://username:password@server.") parser.add_argument("-non-interactive", help="Suitable for script output.", action="store_true", default=False) args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args["url"]) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {"username": username, "password": password}) myreports = ml.my_reports_list() print "+{:s}+".format("-" * 96) sys.stdout.write("| {:50s} | {:4s} | {:4s} | {:10s} | {:14s} |\n".format("name", "did", "jid", "status", "time")) print "+{:s}+".format("-" * 96) total_time = None for report in myreports: did = str(report["definitionId"]) name = report["definitionName"] # run start = datetime.now() jid = ml.run_report(did)
#!/usr/bin/python from ML import ML, parse_url import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument( 'url', metavar='url', type=str, help='http[s]://username:password@server.') parser.add_argument( 'file', metavar='jy_table3.json', type=str, help='Table Definition json file to load.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.create_table(args['file']) if r.status_code == 200: # exit if error print "New table", r.text, "is created successfully." else: print "Failed to create the new table with error", r.status_code, r.text
metavar='data_file', type=str, help='Custom data file to import.') parser.add_argument('-timestamp', metavar='timestamp', type=str, help='timestamp for import.') args = vars(parser.parse_args()) try: (username, password, server) = parse_url(args['url']) except (ValueError, TypeError) as e: print "invalid url" sys.exit(1) ml = ML(server, {'username': username, 'password': password}) r = ml.import_data(args['table'], args['file'], args['timestamp']) if r.status_code != 201: # exit if error print "Failed to submit the job to import data with error ", r.status_code, r.text sys.exit(1) else: job_id = r.text print "Data import job is scheduled with job id = ", job_id i = 0 while (i < 10): print "Check every 2 seconds to see if the job is complete." status = ml.scheduler_job_status(job_id) if status == 'COMPLETED': break else: