def revgeocode(self, latitude, longtitude): url = "https://reverse.geocoder.api.here.com/6.2/reversegeocode.json" parameters = { "app_id": self.appID, "app_code": self.appCode, "mode": "retrieveAddress", "maxresults": "1", "prox": "{},{}".format(latitude, longtitude) } # ---needed conversion---# r = requests.get(url, params=parameters).json() old = Settings().getsettings("Location") try: fulladdr = r["Response"]["View"][0]["Result"][0]["Location"][ "Address"]["Label"] state = r["Response"]["View"][0]["Result"][0]["Location"][ "Address"]["State"] city = r["Response"]["View"][0]["Result"][0]["Location"][ "Address"]["City"] #print(fulladdr) #print(state) old["full"] = fulladdr old["city"] = city Settings().setsettings({"Location": old}) self.logger("UPDATED LOCATION TO: {}".format(old), "alert", "red") except Exception as e: city = old["city"] traceback.print_exc() return city
def trainmodel(self, inputdata, inputlabel): print("training model with new info") # flatten data flatinputdata = self.flattencontextdict(inputdata) humanfeaturenames = [] for name in flatinputdata: humanfeaturenames.append(name) print(humanfeaturenames) Settings().setdata( {"MACHINELEARNING": { "humanfeaturenames": humanfeaturenames }}) # get trainingfeatures and labels #self.tfeatures = Settings().getdata("MACHINELEARNING", "trainingfeatures") #self.tlabels = Settings().getdata("MACHINELEARNING", "traininglabels") self.tfeatures.append(flatinputdata) self.tlabels.append(inputlabel) self.train("training") Settings().setdata( {"MACHINELEARNING": { "trainingfeatures": self.tfeatures }}) Settings().setdata( {"MACHINELEARNING": { "traininglabels": self.tlabels }}) return "Finished training"
def __init__(self): self.tag = "location" self.i = 0 self.string = " " self.GREEN = '\033[92m' self.BLUE = '\033[94m' self.YELLOW = '\033[93m' self.RED = '\033[91m' self.ENDC = '\033[0m' self.appID = Settings().getsettings("Credentials", "hereAppID") self.appCode = Settings().getsettings("Credentials", "hereAppCode")
def checkprefstop(self, lat, lon): # checking if preferred stop is within 500 meters of current location maindict = self.busstop(datetime.now(), lat, lon, radius="500") if maindict: buslat = float(Settings().getsettings( "Personalia", "busstop")["coordinates"]["x"]) buslon = float(Settings().getsettings( "Personalia", "busstop")["coordinates"]["y"]) for id in maindict: stoplat = maindict[id]["lat"] stoplon = maindict[id]["lon"] if buslat == stoplat and buslon == stoplon: return True return False
def weather(self): self.logger("getting weather data") API_KEY = Settings().getsettings("Credentials","weatherApiKey") baseurl = "http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}" city = self.r.get("location").decode() finalurl = baseurl.format(city, API_KEY) response = json.loads(requests.get(finalurl).text) try: temperature = response["main"]["temp"] windspeed = response["wind"]["speed"] cloudpercentage = response["clouds"]["all"] rain = response.get("rain") if rain: rain = rain.get("1h") if rain == None: rain = "None" icon = response["weather"][0]["icon"] iconurl = "http://openweathermap.org/img/wn/{}@2x.png".format(icon) weather = {"temperature": temperature, "windspeed": windspeed, "cloudpercentage": cloudpercentage, "iconurl": iconurl, "rain":rain} interpretation = {"title":"temperature", "subtext": "windspeed", "main":{"image":"iconurl", "test":"test"}} resultdict = {"data":weather, "metadata":interpretation} return resultdict except Exception as e: self.logger("Failed to retreive weather!\nRetrieval URL: "+finalurl, "debug", "red") traceback.print_exc() return {"Failure":"0"} # think of error codes
def convert(self, dataset, processtype="predicting"): print("Converting...") print(self.humanfeatures) for index, data in enumerate(dataset): tmplist = [] if type(data) == dict: for entry in data: value = data[entry] if value not in self.humanfeatures: if processtype == "predicting": print(f"don't know value {value}") # set flag so I know to ask the user about it newvalue = len(self.humanfeatures) tmplist.append(newvalue) self.humanfeatures[value] = len(self.humanfeatures) self.inversehumanfeatures[len( self.humanfeatures)] = value #Settings().setdata({"MACHINELEARNING":{"humanfeatures":self.humanfeatures}}) else: newvalue = self.humanfeatures[value] tmplist.append(newvalue) dataset.remove(data) dataset.insert(index, tmplist) Settings().setdata( {"MACHINELEARNING": { "humanfeatures": self.humanfeatures }}) print("Done converting..") return dataset
def predict(self, contextinput): dataset = self.convert([contextinput], "predicting") dataset = self.normalize(dataset) with open("data/datamodel.pickle", "rb") as f: clf = pickle.load(f) result = clf.predict(dataset)[0] featurenames = Settings().getdata("MACHINELEARNING", "humanfeaturenames") self.visualise(clf, featurenames) return result
def __init__(self): self.tag = "context" self.datasize = 0 self.trainingsize = 0 self.features = [] self.humanfeatures = {} self.inversehumanfeatures = {} self.labels = [] self.getconfirmation = False self.tfeatures = Settings().getdata("MACHINELEARNING", "trainingfeatures") self.tlabels = Settings().getdata("MACHINELEARNING", "traininglabels") self.humanfeatures = Settings().getdata("MACHINELEARNING", "humanfeatures") if self.tfeatures == None: self.tfeatures = [] self.tlabels = [] self.humanfeatures = {} self.trainingfeaturesaslist = []
def normalize(self, dataset, datatype="normal"): print("Normalizing..") if datatype == "normal": if len( dataset[0] ) > self.trainingsize: # check if new data is larger than training sample(it will be just one set, as a list) self.addtotraining(len(dataset[0])) self.train("retrain") newdataset = [] for data in dataset: #if type(data) == str: data = eval(str(data)) newdataset.append(data) if len(data) > self.datasize: self.datasize = len(data) dataset = newdataset trainingdata = Settings().getdata("MACHINELEARNING", "trainingfeatures") for data in dataset: print("comparing datalength!") while len(data) != self.datasize: if len(data) < self.datasize: print("new data is smaller!") data.append(9999) else: print("new data is larger!") for tdata in trainingdata: while len(data) != len(tdata): tdata.append(9999) if datatype == "training": self.trainingsize = self.datasize # set training sample size Settings().setdata( {"MACHINELEARNING": { "trainingsize": self.trainingsize }}) self.trainingfeaturesaslist = dataset Settings().setdata( {"MACHINELEARNING": { "normalizeddataset": dataset }}) return dataset
def get_route(self, address="pensylvania avenue 14", time=datetime.now(), arrival_choice=0): # get home address address = Settings().getsettings("Personalia", "Address")["full"] # get preferred busstop coordinates lat = Settings().getsettings("Personalia", "busstop")["coordinates"]["x"] lon = Settings().getsettings("Personalia", "busstop")["coordinates"]["y"] dep_coords = "{}, {}".format(lat, lon) # get preferred busstop name name = Settings().getsettings("Personalia", "busstop")["name"] # realdeparture = time #get actual route routelist = self.routing(dep_coords=dep_coords, time=realdeparture, arrival_choice=arrival_choice) for section in routelist: self.logger(section) return routelist
def train(self, processtype="normal"): preclf = tree.DecisionTreeClassifier() if processtype == "retrain": print("retraining with new data..") dataset = Settings().getdata( "MACHINELEARNING", "normalizeddataset" ) # use special dataset that had it's samples updated to the correct size dataset = self.normalize(dataset, "training") else: dataset = self.convert(self.tfeatures, "training") dataset = self.normalize(dataset, "training") clf = preclf.fit(dataset, self.tlabels) with open("data/datamodel.pickle", "wb") as f: pickle.dump(clf, f)
def search(self, check=True, numtocheck=0): base = f"https://nyaa.si/?page=rss&q={self.publishchoice}+%2B+[1080p]&c=1_2&f=2" #numtocheck += 1 #feed = feedparser.parse("https://nyaa.si/?page=rss&c=1_2&f=2") feed = feedparser.parse(base) if numtocheck != 0 and type(numtocheck) == int: #feed.entries.reverse() numtocheck -= 1 self.loop = True self.logger(f"GOING TO LOOP {numtocheck} TIMES!", "alert", "red") followlist = [] fakelist = Settings().getsettings("Anime", "shows").split(", ") for show in fakelist: if show[0] == " ": show = show[1:] followlist.append(show) if numtocheck == "all": numtocheck = len(feed.entries) endvalue = "" index = len(feed.entries) mincutoff = index - numtocheck for entry in feed.entries: if index < mincutoff: msg = {"siteshows": self.fulllist} self.r.set("siteshows", json.dumps(self.fulllist)) return endvalue, self.fulllist if mincutoff <= index >= 0: thing = dict(entry) keylist = list(thing.keys()) title = thing["title"] #self.logger(f"working on {title}", "debug", "red") #self.title = title # ([publisher]) (episode name) - (episode number) ([quality])(extension) search = r"(\[.*\]) (.*) - (.*) (\[.*\])(.*)" try: sstring = re.search(search, title) quality = sstring.group(4) epname = sstring.group(2) publisher = sstring.group(1) if quality == "[1080p]" and epname in followlist: epname = sstring.group(2) epnum = sstring.group(3) if epname == "Boku no Hero Academia" and int( epnum) > 63: epname = "Boku no Hero Academia S4" epnum = int(epnum) - 63 airingshow = epname extension = sstring.group(5) fullname = "{} - {}{}".format(epname, epnum, extension) folder = epname self.recode(folder, fullname) link = thing["link"] size = thing["nyaa_size"] imagelink = self.getimage(airingshow) msg = { "title": airingshow, "episode": str(epnum), "imagelink": imagelink } interpretation = { "title": "title", "subtext": "episode", "main": { "image": "imagelink" } } resultdict = {"data": msg, "metadata": interpretation} if len(self.fulllist) < 1: self.fulllist.append(resultdict) if self.fulllist[-1] != resultdict: self.fulllist.append(resultdict) if self.loop: #self.fulllist.append(resultdict) self.logger(f"Working on index {index}") index -= 1 continue if check == False: self.logger(f"saved anime: {airingshow}") self.r.set("lastshow", json.dumps(msg)) return resultdict check = json.loads( self.r.get("lastshow").decode()).get("title") if check != airingshow: self.logger("New show aired!", "info") self.r.set("lastshow", json.dumps(msg)) # download the show self.download(folder, fullname, link) # save current last show self.logger( "Wrote {} to database.".format(airingshow), "info") endvalue = resultdict else: self.logger( "Already downloaded {}.".format(airingshow), "info") endvalue = "empty" index -= 1 else: if not self.loop: self.logger("Nothing new.") endvalue = "empty" except Exception as e: if e != AttributeError: self.logger(e, "debug", "red") endvalue = "empty" if not self.loop: index -= 1 return endvalue
def routing(self, dep_coords="52.103029, 5.241949", arr_coords="52.359176, 4.909479", time=datetime.now(), arrival_choice=0): routelist = [] # first check if preferred stop is near lat, lon = [float(i) for i in dep_coords.split(", ")] prefnear = self.checkprefstop(lat, lon) self.logger(prefnear, "debug", "yellow") if prefnear: # get bus coords lat = Settings().getsettings("Personalia", "busstop")["coordinates"]["x"] lon = Settings().getsettings("Personalia", "busstop")["coordinates"]["y"] dep_coords = "{}, {}".format(lat, lon) # get preferred busstop name name = Settings().getsettings("Personalia", "busstop")["name"] # get busstop walking time location wtstring = Settings().getsettings("Personalia", "busstop")["walking time"] pwt = datetime.strptime(wtstring, "%M:%S").time() walking_time = timedelta(minutes=pwt.minute) url = "https://transit.api.here.com/v3/route.json" # this is the public transit API parameters = { "app_id": api_id, "app_code": api_key, "dep": dep_coords, "arr": arr_coords, "arrival": arrival_choice, "time": time.isoformat(), "routingMode": "realtime", "max": 6 } # max number of results result = requests.get(url, params=parameters) if arrival_choice == 0: arrival_string = "departure" else: arrival_string = "arrival" self.logger("Going for {} at {}".format(arrival_string, time)) result = json.loads(result.text)["Res"] #self.logger(result) connections = result["Connections"]["Connection"] #self.logger(connections) for thing in connections: try: busid = thing["Dep"].get("Stn")["id"] except: busid = None continue if busid == "215150824": # fav bus break connection = thing transfers = connection["transfers"] duration = connection["duration"] start_time = connection["Dep"]["time"] arrival_time = connection["Arr"]["time"] self.logger( "Your journey will start at {}, end at {}, and take {}.".format( self.parsetime(start_time), self.parsetime(arrival_time), self.parseduration(duration))) self.logger("___________________________________________") for index, section in enumerate(connection["Sections"]["Sec"]): secmode = section["mode"] secdeptime = self.parsetime( section["Dep"]["time"])[1] # departure time if secmode == 20 and index == 0: secdeploc = "Current location" else: secdeploc = section["Dep"]["Stn"].get( "name") # departure station name sectransportname = section["Dep"]["Transport"].get( "name", "\b") # transport name if "Stn" in section["Arr"].keys(): secarrsloc = section["Arr"]["Stn"].get( "name") # arrival station name else: try: secarrsloc = section["Dep"].get("AP")["name"] except: secarrsloc = section["Dep"]["Stn"]["name"] if secmode != 20: self.logger(self.transporttypes[str(secmode)]) sectranscat = " using " + self.transporttypes[str( secmode)] # transport category sectransportdir = section["Dep"]["Transport"][ "dir"] # transport direction else: sectranscat = ". Walk" # transport category # look into the future a bit if index + 1 < len(connection["Sections"]["Sec"]): #futsecmode = connection["Sections"]["Sec"][index + 1]["mode"] futsecdeploc = connection["Sections"]["Sec"][ index + 1]["Dep"]["Stn"].get("name") sectransportdir = futsecdeploc else: sectransportdir = secdeploc secduration = self.parseduration( section["Journey"]["duration"]) # journey duration secdistance = section["Journey"]["distance"] # journey distance secarrtime = self.parsetime( section["Arr"]["time"])[1] # time of arrival if str(secmode) in ["0", "1", "2", "3"]: sectransportname = "{} on platform {}".format( sectransportname, section["Arr"] ["platform"]) # arrival platform, if applicable secstring = "You depart from {} at {}{} {} in the direction of {}. your journey will take {} and cross {} meters. You will arrive at {} at around {}.".format( secdeploc, secdeptime, sectranscat, sectransportname, sectransportdir, secduration, secdistance, secarrsloc, secarrtime) self.logger(secstring) sectionlist = [ secdeploc, secdeptime, sectranscat, sectransportname, sectransportdir, secduration, secdistance, secarrsloc, secarrtime ] routelist.append(sectionlist) self.logger("-----------") if prefnear: realdeparture = datetime.strptime(routelist[0][1], "%H:%M:%S") - walking_time self.logger("adding {} to base time of {} to get {}.".format( walking_time, time, realdeparture)) time = realdeparture realdeparture = str(realdeparture).split(" ")[1].split(".")[0] wtlist = str(walking_time).split(":") walking_time = self.parseduration("PT{}M".format(wtlist[1])) self.logger(walking_time) routelist.insert(0, [ "Home", realdeparture, self.transporttypes["20"], "\b", name, walking_time, "\b", name ]) self.logger("_________________________________") return routelist
def getbusstops(self, time=datetime.now()): address = Settings().getsettings("Location")["full"] self.logger("Using address: {}".format(address)) lat, lon = Location().geocode(address) maindict = self.busstop(time, lat, lon, radius="500") return maindict
class Context: def __init__(self): self.tag = "context" self.datasize = 0 self.trainingsize = 0 self.features = [] self.humanfeatures = {} self.inversehumanfeatures = {} self.labels = [] self.getconfirmation = False self.tfeatures = Settings().getdata("MACHINELEARNING", "trainingfeatures") self.tlabels = Settings().getdata("MACHINELEARNING", "traininglabels") self.humanfeatures = Settings().getdata("MACHINELEARNING", "humanfeatures") if self.tfeatures == None: self.tfeatures = [] self.tlabels = [] self.humanfeatures = {} self.trainingfeaturesaslist = [] def logger(self, msg, type="info", colour="none"): mainlogger().logger(self.tag, msg, type, colour) def convert(self, dataset, processtype="predicting"): print("Converting...") print(self.humanfeatures) for index, data in enumerate(dataset): tmplist = [] if type(data) == dict: for entry in data: value = data[entry] if value not in self.humanfeatures: if processtype == "predicting": print(f"don't know value {value}") # set flag so I know to ask the user about it newvalue = len(self.humanfeatures) tmplist.append(newvalue) self.humanfeatures[value] = len(self.humanfeatures) self.inversehumanfeatures[len( self.humanfeatures)] = value #Settings().setdata({"MACHINELEARNING":{"humanfeatures":self.humanfeatures}}) else: newvalue = self.humanfeatures[value] tmplist.append(newvalue) dataset.remove(data) dataset.insert(index, tmplist) Settings().setdata( {"MACHINELEARNING": { "humanfeatures": self.humanfeatures }}) print("Done converting..") return dataset def addtotraining(self, numbertoadd): for dataset in self.trainingfeaturesaslist: while len(dataset) < numbertoadd: dataset.append(9999) def normalize(self, dataset, datatype="normal"): print("Normalizing..") if datatype == "normal": if len( dataset[0] ) > self.trainingsize: # check if new data is larger than training sample(it will be just one set, as a list) self.addtotraining(len(dataset[0])) self.train("retrain") newdataset = [] for data in dataset: #if type(data) == str: data = eval(str(data)) newdataset.append(data) if len(data) > self.datasize: self.datasize = len(data) dataset = newdataset trainingdata = Settings().getdata("MACHINELEARNING", "trainingfeatures") for data in dataset: print("comparing datalength!") while len(data) != self.datasize: if len(data) < self.datasize: print("new data is smaller!") data.append(9999) else: print("new data is larger!") for tdata in trainingdata: while len(data) != len(tdata): tdata.append(9999) if datatype == "training": self.trainingsize = self.datasize # set training sample size Settings().setdata( {"MACHINELEARNING": { "trainingsize": self.trainingsize }}) self.trainingfeaturesaslist = dataset Settings().setdata( {"MACHINELEARNING": { "normalizeddataset": dataset }}) return dataset def train(self, processtype="normal"): preclf = tree.DecisionTreeClassifier() if processtype == "retrain": print("retraining with new data..") dataset = Settings().getdata( "MACHINELEARNING", "normalizeddataset" ) # use special dataset that had it's samples updated to the correct size dataset = self.normalize(dataset, "training") else: dataset = self.convert(self.tfeatures, "training") dataset = self.normalize(dataset, "training") clf = preclf.fit(dataset, self.tlabels) with open("data/datamodel.pickle", "wb") as f: pickle.dump(clf, f) def predict(self, contextinput): dataset = self.convert([contextinput], "predicting") dataset = self.normalize(dataset) with open("data/datamodel.pickle", "rb") as f: clf = pickle.load(f) result = clf.predict(dataset)[0] featurenames = Settings().getdata("MACHINELEARNING", "humanfeaturenames") self.visualise(clf, featurenames) return result def flattencontextdict(self, inputdict): tmpdictlist = [] for minidict in inputdict: entry = minidict value = inputdict[minidict] if type(value) == dict: for val in value: val2 = value[val] if val2 == "true": # make true and false statements into proper booleans value[val] = True if val2 == "false": value[val] = False if val == "lat" or val == "lon": # make coordinates 3 decimals coordinate = str(val2).split(".") decimal = coordinate[1][:4] newcoordinate = float(coordinate[0] + "." + decimal) value[val] = newcoordinate value[val] = str(value[val]) tmpdictlist.append(value) newdict = {} for flatdict in tmpdictlist: newdict.update(flatdict) return newdict def trainmodel(self, inputdata, inputlabel): print("training model with new info") # flatten data flatinputdata = self.flattencontextdict(inputdata) humanfeaturenames = [] for name in flatinputdata: humanfeaturenames.append(name) print(humanfeaturenames) Settings().setdata( {"MACHINELEARNING": { "humanfeaturenames": humanfeaturenames }}) # get trainingfeatures and labels #self.tfeatures = Settings().getdata("MACHINELEARNING", "trainingfeatures") #self.tlabels = Settings().getdata("MACHINELEARNING", "traininglabels") self.tfeatures.append(flatinputdata) self.tlabels.append(inputlabel) self.train("training") Settings().setdata( {"MACHINELEARNING": { "trainingfeatures": self.tfeatures }}) Settings().setdata( {"MACHINELEARNING": { "traininglabels": self.tlabels }}) return "Finished training" def getprediction(self, inputdata): print(f"Using: {inputdata} to predict contextstate") flatinputdata = self.flattencontextdict(inputdata) result = self.predict(flatinputdata) return result def visualise(self, clf, featurenames): dot_data = StringIO() tree.export_graphviz(clf, out_file=dot_data, feature_names=featurenames) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_pdf("ml.pdf") def handler(self, data): self.logger(f"Context handler is running!") self.logger(data)
import datetime, pytz import os.path import traceback import requests import json import sys import configobj from .google.service import Service from sortedcontainers import SortedDict from time import sleep from components.settings import Settings from components.logger import logger as mainlogger client_id = Settings().getsettings("Credentials", "googleAppId") client_secret = Settings().getsettings("Credentials", "googleAppSecret") class google: def __init__(self): self.SCOPES = ['https://www.googleapis.com/auth/calendar'] self.calendars = [] self.calendar_names = {} self.realevents = SortedDict({}) self.isfree = False self.creds = {} self.tag = "google" def logger(self, msg, type="info", colour="none"): mainlogger().logger(self.tag, msg, type, colour)