Exemplo n.º 1
0
 def __init__(self,):
     self.num_dancers = 0  # number of connected dancers
     self.is_idle = True
     self.counter = 0
     self.init_counter = 24
     self.start_time = time.time()
     dance_model_path = "model_weights.json"
     dance_scaler_path = "dnn_std_scaler.bin"
     self.ml = ML(
         dance_scaler_path=dance_scaler_path, dance_model_path=dance_model_path,
     )
     self.skip_initial_readings_cnt = [[50, 50], [50, 50], [50, 50]]
Exemplo n.º 2
0
 def test_curated_playlist(self):
     os.mkdir("1")
     tracks_to_classify = [{
         'url':
         "https://p.scdn.co/mp3-preview/b326e03624cb098d8387e17aa46669edac0d025a?cid=774b29d4f13844c495f206cafdad9c86",
         "id": "2takcwOaAZWiXQijPHIx7B"
     }]
     tracks_ids = ModelHandler().curated_tracks(tracks_to_classify, "1")
     assert tracks_ids is None
     ML().train_model("test_music", "test_music_2", path_to_save="1/model")
     track_ids = ModelHandler().curated_tracks(tracks_to_classify, "1")
     assert isinstance(track_ids, list)
     shutil.rmtree("1")
Exemplo n.º 3
0
 def __init__(self):
     self.spotify_api = SpotifyAPI()
     self.ml = ML()
Exemplo n.º 4
0
from intcomm import IntComm
from ML import ML

if __name__ == "__main__":
    dance_model_path = "model_weights.json"
    dance_scaler_path = "dnn_std_scaler.bin"

    ml = ML(
        dance_scaler_path=dance_scaler_path,
        dance_model_path=dance_model_path,
    )

    # change this according to your serial port
    # 0: "/dev/ttyACM0"
    # 1: "/dev/ttyACM1"
    # 2: "/dev/ttyACM2"
    intcomm = IntComm(0)

    while True:
        data = intcomm.get_line()
        if len(data) == 0 or data[0] != "#":
            print("Invalid data:", data)
            continue

        data = data[1:].split(",")
        if len(data) == 10:
            yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz, emg = data

            yaw, pitch, roll, gyrox, gyroy, gyroz, accx, accy, accz = (
                float(yaw),
                float(pitch),
Exemplo n.º 5
0
                    help='Interfaces|LSPs|Nodes|Demands.')
parser.add_argument('property', help='Property.')
parser.add_argument('keys', help='"|" delimited list of keys.')
parser.add_argument('from', help='from date YYMMDD_HHMM_UTC.')
parser.add_argument('to', help='to date YYMMDD_HHMM_UTC.')
parser.add_argument('-keyColumns',
                    help='list of key columns using comma to separate.')
args = vars(parser.parse_args())

try:
    (username, password, server) = parse_url(args['url'])
except (ValueError, TypeError) as e:
    print "invalid url"
    sys.exit(1)

myml = ML(server, {'username': username, 'password': password})
#r = myml.explore(args['object'], args['filter'], 100, {'SetupBW', 'SourceNode'})

date_pattern = '%y%m%d_%H%M_%Z'
date_from = time.strptime(args['from'], date_pattern)
date_to = time.strptime(args['to'], date_pattern)

if args['keyColumns']:
    print myml.time_series(args['object'], args['property'],
                           args['keys'].split("|"), date_from, date_to,
                           args['keyColumns'].split(","))
else:
    print myml.time_series(args['object'], args['property'],
                           args['keys'].split("|"), date_from, date_to)
#print myml.time_series("Interfaces", "TraffIn", ["AM_LA_BB2", "TenGigE0/2/2"])
Exemplo n.º 6
0
 def test_train_model(self):
     ml = ML()
     ml.train_model("test_music",
                    "test_music_2",
                    path_to_save="tester_model")
     assert "tester_model" in os.listdir()
Exemplo n.º 7
0
 def test_classify_tracks(self):
     ml = ML()
     ml.classify_tracks(['test_music/514q3otlT6HczfChuLDUSa.mp3'],
                        "tester_model", "test_music")
Exemplo n.º 8
0
from flask import Flask, jsonify, Response, request
# from ML import ML
from ML import ML
from DB import DB
from Social import Social
from SessionManager import SessionManager
from flask_cors import CORS

app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
ml = ML()
db = DB()
social = Social([db, ml])
sessMan = SessionManager(db, social)


@app.route('/sessMan/getTweets', methods=['get'])
def runGetTweets():
    PID = request.args.get('key')[1:]
    count = db.getCountPerDay(PID)[0][0]
    print(count)
    return sessMan.getTweets(PID, count), 200


@app.route('/sessMan/isRunning', methods=['get'])
def profileIsRunning():
    PID = request.args.get('key')[1:]
    return str(sessMan.isProfileRunning(PID))


@app.route('/sessMan/stopProfile', methods=['get'])
Exemplo n.º 9
0
def edit():
    json_list = []
    ##trump=pd.read_csv("trump_tweets_classified.csv")

    tweets = ML()

    tweets2 = tweets[[
        "id", "Created at", "Screen Name", "Tweet Text", "sentiment", "Subject"
    ]]
    tweets2["Frequency"] = 1

    #modifying the date-time string
    time_string = tweets2["Created at"].to_list()
    time_edited = []
    for item in time_string:
        item2 = item.replace(" +0000", "")
        time_edited.append(item2)

    tweets2["Created at"] = time_edited
    tweets2['Created at'] = pd.to_datetime(tweets2['Created at'],
                                           infer_datetime_format=True)

    tweets2['date'] = tweets2['Created at'].dt.date
    #most frequent dates most are from the same day, wil lscreq axis if not all from the same day
    date = pd.DataFrame(tweets2.groupby("date")["Frequency"].count())
    date = date.sort_values(by='Frequency', ascending=False)
    #to keep majority of the graph, only display one day of data
    date = date.head(1)
    day = date.index.values[0]

    edited_time_tweets = tweets2.loc[tweets2["date"] == day, [
        "date", "Created at", "Screen Name", "Tweet Text", "sentiment",
        "Subject", "Frequency"
    ]]
    edited_time_tweets = edited_time_tweets.dropna()

    edited = edited_time_tweets.sort_values(by='Created at')
    edited = edited.rename(columns={"Created at": "datetime"})
    edited = edited.drop(columns=['date'])

    #json positive sentiment
    only_positive = edited.loc[edited["sentiment"] == "positive", :]
    positive_frequency = only_positive[["datetime", "Frequency", "Subject"]]

    positive_frequency_trump = positive_frequency.loc[
        positive_frequency["Subject"] == "Trump", :]
    positive_frequency_andrews = positive_frequency.loc[
        positive_frequency["Subject"] == "Andrews", :]

    only_positive_trump = positive_frequency_trump.resample(
        '0.05T', on='datetime').sum()
    only_positive_trump = only_positive_trump.reset_index()
    only_positive_trump["sentiment"] = "positive"

    only_positive_andrews = positive_frequency_andrews.resample(
        '1H', on='datetime').sum()
    only_positive_andrews = only_positive_andrews.reset_index()
    only_positive_andrews["sentiment"] = "positive"

    #json negative sentiment
    only_negative = edited.loc[edited["sentiment"] == "negative", :]
    negative_frequency = only_negative[["datetime", "Frequency", "Subject"]]

    negative_frequency_trump = negative_frequency.loc[
        negative_frequency["Subject"] == "Trump", :]
    negative_frequency_andrews = negative_frequency.loc[
        negative_frequency["Subject"] == "Andrews", :]

    only_negative_andrews = negative_frequency_andrews.resample(
        '1H', on='datetime').sum()
    only_negative_andrews = only_negative_andrews.reset_index()
    only_negative_andrews["sentiment"] = "negative"

    only_negative_trump = negative_frequency_trump[["datetime", "Frequency"]]
    only_negative_trump = negative_frequency_trump.resample(
        '0.05T', on='datetime').sum()
    only_negative_trump = only_negative_trump.reset_index()
    only_negative_trump["sentiment"] = "negative"

    frames = [only_positive_trump, only_negative_trump]
    sentiment_concat = pd.concat(frames)
    sentiment_concat['datetime'] = sentiment_concat['datetime'].astype(str)

    sentiment_group = sentiment_concat.groupby(['sentiment', "datetime"])
    sentiment_group_2 = pd.DataFrame(sentiment_group["Frequency"].sum())

    results1 = defaultdict(lambda: defaultdict(dict))

    for index, value in sentiment_group_2.itertuples():
        for i, key in enumerate(index):
            if i == 0:
                nested = results1[key]
            elif i == len(index) - 1:
                nested[key] = value
            else:
                nested = nested[key]

    frames = [only_positive_andrews, only_negative_andrews]
    sentiment_concat = pd.concat(frames)
    sentiment_concat['datetime'] = sentiment_concat['datetime'].astype(str)

    sentiment_group = sentiment_concat.groupby(['sentiment', "datetime"])
    sentiment_group_2 = pd.DataFrame(sentiment_group["Frequency"].sum())

    results2 = defaultdict(lambda: defaultdict(dict))

    for index, value in sentiment_group_2.itertuples():
        for i, key in enumerate(index):
            if i == 0:
                nested = results2[key]
            elif i == len(index) - 1:
                nested[key] = value
            else:
                nested = nested[key]

    tweets = {"Trump": results1, "Andrews": results2}

    json_list.append(tweets)

    trump2 = tweets2.loc[tweets2["Subject"] == "Trump", :]

    trump_sentiment = trump2.groupby(['sentiment'])["Frequency"].count()
    total_sentiment = trump2["sentiment"].count()
    percentage_sentiment_trump = ((trump_sentiment / total_sentiment) *
                                  100).round()

    percentage_trump = percentage_sentiment_trump.to_dict()

    andrews2 = tweets2.loc[tweets2["Subject"] == "Andrews", :]

    andrews_sentiment = andrews2.groupby(['sentiment'])["Frequency"].count()
    total_sentiment = andrews2["sentiment"].count()
    percentage_sentiment_andrews = ((andrews_sentiment / total_sentiment) *
                                    100).round()

    percentage_andrews = percentage_sentiment_andrews.to_dict()

    percentage_dict = {
        "Trump": percentage_trump,
        "Andrews": percentage_andrews
    }

    json_list.append(percentage_dict)

    example = trump2.head(5)

    screen_name = example["Screen Name"].to_list()
    tweet_text = example["Tweet Text"].to_list()

    res = {screen_name[i]: tweet_text[i] for i in range(len(screen_name))}

    example2 = andrews2.head(5)

    screen_name2 = example2["Screen Name"].to_list()
    tweet_text2 = example2["Tweet Text"].to_list()

    res2 = {screen_name2[i]: tweet_text2[i] for i in range(len(screen_name))}

    tweet_dict = {"Trump": res, "Andrews": res2}

    json_list.append(tweet_dict)

    return json_list
Exemplo n.º 10
0
from ML import ML

'''

data found here: http://files.pushshift.io/reddit/

API docs here: https://github.com/pushshift/api

don't request more than ~1/sec. Each request can return <500 terms.

getting 1000 posts each for 100 users each from 4 subreddits (i.e., 800 requests)
took 48 minutes, so ~4s/request. Hmmm.

'''

ml = ML('/home/declan/Documents/code/reddit_DS/savedat/nyc_losangeles_unitedkingdom_greece_indonesia_japan_newzealand_1000users_24bins_00-16-17')
#ml = ML('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat1/')
ml.addExtraDataSets('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat1/')
#ml.addExtraDataSets('/home/declan/Documents/code/reddit_DS/savedat/rizki_dat2/')
#ml.prettyPrintDB(ml.df)
#ml.postAvgTimesByRegion()
#ml.simpleLinReg()
#ml.cyclicMetricSGD(alpha=10**-4, timesteps=4000, show_plot=False)

ml.NN1()

exit(0)

ml.cyclicMetricSGD(alpha=10**-3, timesteps=4000, show_plot=False)
ml.cyclicMetricSGD(alpha=10**-3, timesteps=40000, show_plot=False)
ml.cyclicMetricSGD(alpha=10**-5, timesteps=40000, show_plot=False)