def execute(self, printer): r = requests.get(Ticker.TICKER_URL) util.print_header(printer, 'Postillon') if r.status_code != 200: print("Error: Couldn't load postillon tickers (request failed)") util.print_error(printer) return try: json = r.json() except ValueError as e: print(e) print("Error: Couldn't fetch tickers (invalid or missing response content)") util.print_error(printer) return try: # extract ticker texts from response, trim to at max self.count tickers tickers = [ticker['text'] for ticker in json['tickers']][0:self.count] except KeyError as e: print(e) print("Error: Couldn't fetch tickers (unexpected response format)") util.print_error(printer) return for ticker in tickers: printer.write(ticker + '\n') printer.feed(1) printer.feed(1)
def extract_users (self, calendar_df_iterator): """ given an iterator over calendar dataframes, this constructs and returns a dataframe containing all users """ print_header ("EXTRACTING USERS") #==========[ ITERATE OVER ALL DFS ]========== for cdf in calendar_df_iterator (): print_status ("Extract users", "next df") #=====[ Step 1: sort by user ]===== print_inner_status ("extract_users", "sorting by user id") cdf = cdf.sort ('user') #=====[ Step 2: init user representations ]===== print_inner_status ("extract_users", "initializing user representations") unique_uids = [uid for uid in cdf['user'].unique ()] for uid in unique_uids: if not uid in self.user_representations: self.user_representations[uid] = self.init_user_representation(uid) #=====[ Step 3: update the user representations ]===== print_inner_status ("extract_users", "updating user representations") cdf.apply (self.update_user_representation, axis = 1) #=====[ Step 4: convert to df, delete irrelevant stuff ]===== print_inner_status ("extract_users", "converting to dataframe") self.users_df = pd.DataFrame(self.user_representations.values()) del self.user_representations return self.users_df
def execute(self, printer): # load all available calendars of the authenticated user calendars = [] # we loop until no nextPageToken is returned, as this indicates that no more pages exist currentResult = {'nextPageToken': None} while 'nextPageToken' in currentResult: currentResult = self.cal_service.calendarList().list(pageToken=currentResult['nextPageToken']).execute() # we only need the calendar IDs calendars.extend([item.get('id') for item in currentResult.get('items', [])]) if len(calendars) == 0: # no calendars found, therefore no events can exist return # we want to load events for the current (or next) day startTime = util.calculate_target_date() endTime = startTime + datetime.timedelta(days=1) # correctly format times for the request startTime, endTime = startTime.isoformat(), endTime.isoformat() # load events using the Google API events = [] # we will accumulate the events of all calendars in this list for calendar in calendars: # again looping so that we do not miss events when events are returned on more than one page eventsResult = {'nextPageToken': None} while 'nextPageToken' in eventsResult: eventsResult = self.cal_service.events().list( calendarId=calendar, timeMin=startTime, timeMax=endTime, singleEvents=True, orderBy='startTime', pageToken=eventsResult['nextPageToken']).execute() events.extend(eventsResult.get('items', [])) # sort events (necessary because we join events of multiple calendars) events = sorted(events, key=util.sortkey_event_datetime) # print events util.print_header(printer, 'Termine') if len(events) == 0: # no events for the target day printer.justify('C') printer.write('es stehen keine Termine\n') printer.write('im Kalender.\n') printer.justify('L') else: for event in events: # if this is not an all-day event, print the start time if 'dateTime' in event['start']: printer.underlineOn() printer.write(event['start']['dateTime'][11:16]) # extract time from ISO8601 timestamp printer.underlineOff() printer.write(' ') printer.write(event['summary'] + '\n') printer.feed(2)
def execute(self, printer): # perform API request r = requests.get(self.api_url, params=self.params) print_header(printer, 'Wetter') if r.status_code != 200: print("Error: Couldn't reach darksky API for weather information") print_error(printer) return json = r.json() # calculate UNIX timestamp of the data that should be extracted from the response # TODO: make sure this works with all time zones target_date = calculate_target_date() timestamp = target_date.timestamp() forecast_data = None for current_day in json['daily']['data']: if current_day['time'] == timestamp: # found correct day forecast_data = current_day break else: # couldn't find appropriate data for the desired day print("Error: Couldn't acquire weather data for", target_date.isoformat) print_error() return forecast_text = ( "{summary} Die Temperatur beträgt zwischen {temperatureLow}°C " "und {temperatureHigh}°C. Die Niederschlagswahrscheinlichkeit beträgt {precipProbability}%." ) try: # calculate percentage forecast_data['precipProbability'] *= 100 # insert weather data into format string forecast_text = forecast_text.format(**forecast_data) except KeyError: print( "Error: acquired weather data is missing some required attributes" ) print_error() return printer.write(forecast_text) printer.feed(2)
def run(): validated = False user_info = None print_header() while not validated: user_info = get_user_info() validated = validate_input(user_info) create_user(user_info) created_user = get_user_from_api(user_info['username']).json() pp = pprint.PrettyPrinter(indent=2) pp.pprint(created_user) add_department_and_vanity_name(user_info) set_user_as_employee(user_info) set_join_before_host_and_topic(user_info, created_user) set_pincode(user_info, created_user) add_assistants(user_info)
def execute(self, printer): # currently we fetch top buzzfeed articles (don't judge me, they are a good laugh) params = {'country': 'de', 'pageSize': self.count} r = requests.get( News.API_ENDPOINT, params=params, headers={'Authorization': 'Bearer ' + config.NEWSAPI_KEY}) util.print_header(printer, 'Nachrichten') if r.status_code != 200: print("Error: Couldn't fetch news articles (request failed)") util.print_error(printer) return try: json = r.json() except ValueError as e: print(e) print( "Error: Couldn't fetch headlines (invalid or missing response content)" ) util.print_error(printer) return try: articles = [_Article(**current) for current in json['articles']][0:self.count] except KeyError as e: print(e) print( "Error: Couldn't fetch headlines (unexpected response format)") util.print_error(printer) return for a in articles: # print the source printer.underlineOn() printer.write(a.source_name + '\n') printer.underlineOff() # print the headline printer.write(a.title + '\n') printer.feed(1) # TODO: implement QR-Code generation printer.feed(1)
def dispatch_link(self, gpu, link): link = self.strip_referrals(link) domain = urlparse(link).hostname print "found", gpu, "url:", link self.counter.incr('gpus', gpu) self.counter.incr('domains', domain) # take action if 'newegg' in link: item_id = link.split('?Item=')[1].split('&ignorebbr=')[0] newegg.Newegg(item_id) if platform == "linux" or platform == "linux2" or platform == "darwin": # Mac & Linux pygame.mixer.init() pygame.mixer.music.load('sound_file') pygame.mixer.music.play() elif platform == "win32": winsound.PlaySound(self.settings['sound_file'], winsound.SND_ASYNC) return elif 'amazon' in link: pass #TODO elif 'bhphotovideo' in link: pass #TODO elif 'nvidia' in link: pass #TODO else: pass #TODO print 'havent implemented', domain util.print_header("PARSED LINK:", link) webbrowser.open(link) if platform == "linux" or platform == "linux2" or platform == "darwin": # Mac & Linux pygame.mixer.init() pygame.mixer.music.load('sound_file') pygame.mixer.music.play() elif platform == "win32": winsound.PlaySound(self.settings['sound_file'], winsound.SND_ASYNC)
def console(self, cmd='', expect=None, show_cmdbuf=True, rtn_cmdbuf=False): """Attach main program's console to the subprocess's console for its input and output. This allows user to manually interact with the subprocess. The subprocess console will be detached either by Ctrl-], or when the expect regex is found. - cmd: an optional command to be executed, default is an Enter key to display console prompt. - expect: a regex to expect, if found, detached from the subprocess and return (o, e). - show_cmdbuf: if True, and if exited with Ctrl-], print the manual steps done within the console session. Note, special keys (arrow, tab, delete, etc) are printed as raw. - rtn_cmdbuf: if True, return the intercepted user input as a string, together with (o, e) - return: return (o, e), the outputs from stdout and stderr, When exit with Ctrl-], return (o, e, cmdbuf) if rtn_cmdbuf is True, else None. """ if not self.is_alive(): self.print_warn('Connection %s dropped, reconnecting' % self.name) self._connect() util.print_header("Console attached to '%s'. Escape character is '^]'" % self.name) if cmd is not None: cmd = cmd.strip() + '\n' results = self.cmd_interact(cmd=cmd, expect=expect, timeout=self.FOREVER) if len(results) == 3: o, e, steps = results steps = steps.splitlines() util.print_header('Console detached from "%s", by Ctrl-]' % self.name) if show_cmdbuf: util.print_green("Here are the manual steps done in interactive console of %s:" % self.name) for x in steps: util.print_green(' %s' % x.__repr__()) print('') if rtn_cmdbuf: return o, e, steps return # return None to keep interactive console clean. else: o, e = results util.print_header('Console detached from "%s"' % self.name) return o, e
def get_save_name (endpoint, pull_end): """ Function: get_save_name ----------------------- given the finish index, returns a string that can serve as a filename for it """ return 'up_to_' + str(pull_end) + '.df' if __name__ == "__main__": #==========[ Step 1: parse arguments ]========== endpoint, pull_data, start, upper_limit = parse_arguments (sys.argv) print_header ("Pulling data from " + endpoint + ": (start, upper_limit) = " + str(start) + ", " + str(upper_limit)) #==========[ Step 2: set up storage delegate ]========== storage_delegate = StorageDelegate () #==========[ PULL DATA]========== pull_size = 500 data_dfs = [] num_pulled = 0 while num_pulled < upper_limit: #=====[ Step 1: set pull parameters ]===== pull_start = start + num_pulled
def __init__(self, filename): self.settings = json.load(open(filename)) self.counter = counter.Counter(self.settings['counter']) self.threads, self.link_map = [], {} util.print_header("Settings:", json.dumps(self.settings, indent=2))
def add_to_cart(self): url = 'https://secure.newegg.com/Shopping/AddToCart.aspx?Submit=ADD&ItemList=' + str( self.item_id) util.print_header("Visiting:", url) webbrowser.open(url)
"version int DEFAULT 10 NOT NULL," "last_sync datetime DEFAULT '2016-01-01 00:00:00' NOT NULL" ")" ) base.cur.execute(sql) base.cur.execute("INSERT INTO config () VALUES ()") base.db.commit() if version < 10004: result += "Add option to remove and add shifts\n" new_version = 10004 # 1.0.4 if new_version is not None: if not failure: base.update("config", {"version": new_version}, True, ("WHERE constant = 'X'", ())) base.db.commit() result += ("Updated from " + jinja_filters.readable_version(version) + " to " + jinja_filters.readable_version(new_version)+"\n") else: base.db.rollback() result += "FAILURE!\n" else: result += "No update available("+jinja_filters.readable_version(version)+")\n" return result if __name__ == "__main__": util.print_header() #print db_update() print git_update()
if __name__ == "__main__": so = SpotOn () #=====[ Step 1: train semantic analysis ]===== # print_header ("Demo Script - Training semantic analysis models") # so.train_semantic_analysis () #=====[ Step 2: save semantic analysis models ]===== # print_header ("Demo Script - Saving semantic analysis models") # so.semantic_analysis.save () #=====[ Step 3: load semantic analysis models ]===== print_header ("Demo Script - Loading semantic analysis models") so.semantic_analysis.load () #=====[ Step 4: load users ]===== print_header ("Demo Script - Getting users") # so.get_users () so.load_users () #=====[ Step 3: apply to activity dfs ]===== print_header ("Demo Script - Performing semantic analysis on activities") for adf in so.storage_delegate.iter_activity_dfs (): #=====[ Semantic analysis on adf ]===== adf = so.semantic_analysis.add_semantic_summary (adf, 'name')