def face_recognize(image): image_path = get_file_path('data_test', image) image_result = get_file_path('result_test', image_name) objects_path = get_file_path('other', 'object_names') model_path = get_file_path('result_train', 'model_name') face_detector = get_face_detector_cnn() face_embedding = get_vgg_face_embedding( get_file_path('Pre_trained_CNN', 'vgg_model')) image_org = cv2.imread(image_path) image_gray = cv2.cvtColor(image_org, cv2.COLOR_RGB2GRAY) coodrs = face_detector(image_gray, 1) model = tf.keras.models.load_model(model_path) objects = get_object_names(objects_path) images, face_box = crop_rect_box_coodrs(coodrs, image_org, True) image_org = Image.fromarray(cv2.cvtColor(image_org, cv2.COLOR_BGR2RGB)) for i in range(images.shape[0]): box = face_box[i] face_encode = preprocess_image(images[i], face_embedding) face_embedded = K.eval(face_encode) y = model.predict(face_embedded) if np.max(y) > 0.7: person = objects[np.argmax(y)] else: person = "*" print(np.max(y), person) # cv2.waitKey(0) # image_org = cv2.rectangle(image_org,(box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0,255,0), 1) fort = ImageFont.truetype(fontPath + "/" + "FreeMonoBold.ttf", size=60) pos_name = (box[0] + box[2], box[1] - 5) pos_box = (box[0] + box[2] - 1, box[1] - 5) pos_rect = (box[0], box[1]), (box[0] + box[2], box[1] + box[3]) if person != "*": tw, th = fort.getsize(person) canvas = Image.new('RGB', (int(tw / 5) - 10, int(th / 5) + 1), "orange") image_org.paste(canvas, pos_box) draw = ImageDraw.Draw(image_org) if person != "*": draw.text(pos_name, person, 'blue', fort=fort) draw.rectangle(pos_rect, outline='green') image_org.save(image_result)
def draw_pixel_art(canvas: Canvas, top_left: tuple, file_name: str, pixel: str = 10, palette=(None, '#75B9BE', 'black', 'white')): x_original = top_left[0] x = top_left[0] y = top_left[1] f = open(get_file_path(file_name), 'r') # for outputting each row for line in f.readlines(): pixel_strings = line.split(',') # for outputting each cell (for the row): for num_string in pixel_strings: num = int(num_string) color = palette[num] if color is not None: make_square(canvas, (x, y), pixel, fill_color=color, outline_color='#000') x += pixel # increment the y and reset the x when you're about to move # to a new row y += pixel x = x_original
def write(self, css_text=''): """ Output a human readable version of the css file in utf-8 format. **Notes:** - The file is human readable. It is not intended to be human editable as the file is auto-generated. - Pre-existing files with the same name are overwritten. :type css_text: str :param css_text: Text containing the CSS to be written to the file. :return: None **Example:** >>> css_text = '.margin-top-50px { margin-top: 3.125em }' >>> css_file = CSSFile() >>> css_file.write(css_text=css_text) """ parse_string = parseString(css_text) ser.prefs.useDefaults() # Enables Default / Verbose Mode file_path = get_file_path(file_directory=self.file_directory, file_name=self.file_name, extension='.css') with open(file_path, 'w') as css_file: css_file.write(parse_string.cssText.decode('utf-8'))
def test_get_file_path(self): file_directory = getcwd() file_name = 'blowdry' extensions = ['.css', '.min.css', '.txt', '.mp3', '.anything', '.md', '.html', '.rst'] for extension in extensions: expected_file_path = path.join(getcwd(), file_name + extension) file_path = get_file_path(file_directory=file_directory, file_name=file_name, extension=extension) self.assertEqual(file_path, expected_file_path)
def __init__(self, file_directory=getcwd(), file_name='', extension=''): self.file_directory = str(file_directory) self.file_name = str(file_name) self.file_path = get_file_path( file_directory=self.file_directory, file_name=self.file_name, extension=str(extension) ) try: # Python 2.7 Compliant makedirs(file_directory) # Make 'html' directory except OSError: if not path.isdir(file_directory): # Verify directory existences raise OSError(file_directory + ' is not a directory, and could not be created.')
def minify(self, css_text=''): """ Output a minified version of the css file in utf-8 format. **Definition:** The term minify "in the context of CSS means removing all unnecessary characters, such as spaces, new lines, comments without affecting the functionality of the source code." *Source:* https://www.jetbrains.com/phpstorm/help/minifying-css.html **Purpose:** | The purpose of minification is to increase web page load speed. | Reducing the size of the CSS file reduces the time spent downloading the CSS file and waiting for the page to load. **Notes:** - The file is minified and not human readable. - Pre-existing files with the same name are overwritten. - Uses the cssutils minification tool. **Important:** - ``ser.prefs.useMinified()`` is a global setting. It must be reset to ``ser.prefs.useDefaults()``. Otherwise, minification will continue to occur. This can result in strange behavior especially during unit testing or in code called after this method is called. :type css_text: str :param css_text: Text containing the CSS to be written to the file. :return: None **Example:** >>> css_text = '.margin-top-50px { margin-top: 3.125em }' >>> css_file = CSSFile() >>> css_file.minify(css_text=css_text) """ parse_string = parseString(css_text) ser.prefs.useMinified() # Enable minification. file_path = get_file_path(file_directory=self.file_directory, file_name=self.file_name, extension='.min.css') with open(file_path, 'w') as css_file: css_file.write(parse_string.cssText.decode('utf-8')) ser.prefs.useDefaults() # Disable minification.
def draw_pixel_art( canvas:Canvas, top_left:tuple, file_name='heart.csv', pixel:int=10, palette=(None, '#75B9BE', 'black', 'white') ): x = top_left[0] y = top_left[1] f = open(get_file_path(file_name)) for line in f.readlines(): row = [] cells = line.split(',') for cell in cells: row.append(int(cell)) make_row(canvas, (x, y), row, palette=palette, pixel=pixel) y += pixel
import utilities import ssl # for https requests from urllib.request import urlopen # 1. get it from the internet context = ssl._create_unverified_context() response = urlopen('https://www.google.com', context=context) file_data = response.read().decode('utf-8', 'ignore') # 2. print it to the screen print(file_data) # 3. write it to a file file_path = utilities.get_file_path('google_homepage.html', subdirectory='results') f = open(file_path, 'w') f.write(file_data) f.close() print('Web page written to results/google_homepage.html. Go take a look!')
import utilities word = input('what word do you want to look for?') word = word.lower() counter = 1 f = open(utilities.get_file_path('moby_dick.txt'), 'r', encoding='utf8') f_destination = open(utilities.get_file_path(word + '.txt'), 'a', encoding='utf8') for line in f.readlines(): # how many times does the word "Moby" appear? words = line.lower().split(' ') if word in words: # is the word 'Moby' in any slot of the list? # print(counter, line) f_destination.write(str(counter) + '. ' + line) counter += 1 f.close() f_destination.close() # print('Moby appears ', ?????, 'times in the file.')
import cfg import utilities from time import sleep import subprocess import threading from random import choice import re animal_sounds_script_path = utilities.get_file_path( __file__, cfg.animal_sounds_script_path) animal_soundfiles_path = utilities.get_file_path(__file__, cfg.animal_soundfiles_path) def create_dict_of_filenames() -> dict: """ Create a dict who's keys are names of available sound files without any suffixes. Values are lists containing all variations on that filename. This means that if there are 2 files named horse, horse1.mp3 and horse2.mp3, the equivalent dict entry would be horse:[horse1, horse2] """ # get list of animals files sound_files = subprocess.check_output([f'ls {animal_soundfiles_path}'], shell=True) sound_files = sound_files.decode() # list of file names without the .mp3 suffix sound_files_list = [x.split('.')[0] for x in sound_files.split('\n') if x] # turn into a dictionary of lists, where file names of the same name but with different numbers appended, # e.g. 'horse1', 'horse2', 'horse3', etc, will be put into a single list in the dict with a key of 'horse' sound_files_dict = {}
import json import utilities # 1. read lookup table from a file: f = open(utilities.get_file_path('state_capitals.json'), 'r') # 2. conveniance function that converts text from # a JSON file into dictionary: capital_lookup = json.loads(f.read()) # 3. Then access the dictionary: print('You can also load a dictionary from a file!') print('The capital of Florida is:', capital_lookup.get('Florida')) print('The capital of Illinois is:', capital_lookup.get('Illinois')) print('The capital of California is:', capital_lookup.get('California')) print('The capital of Massachusetts is:', capital_lookup.get('Massachusetts'))
''' What was the average time of someone completing the boston marathon in 2015? ''' import utilities file_path = utilities.get_file_path('marathon_results_2015.csv') f = open(file_path, 'r', encoding='utf8') print('analyze file here...') f.close()
import cfg import utilities import re from time import sleep import threading from datetime import datetime path_to_text4michael = utilities.get_file_path(__file__, "other/text4michael.txt") def process_msg(message, sender_username) -> None: """ Processes every message received. Formats and sends calls for them to be output to the terminal Messages with the hide command !hide will not be output to the terminal """ if not re.search("!hide", message): write_to_text4michael_thread = threading.Thread( target=write_to_text4michael, args=(message, sender_username)) write_to_text4michael_thread.start() def write_to_text4michael(message, sender_username) -> None: """ format text, then wait for other/text4michael.txt to be available to append the text to it """ # format text time = datetime.now().strftime("%H:%M:%S") logstring = f"{time} - {sender_username}: {message}" + "\n"
import talk_to_michael import extra_message_parser # import extra_commands import importlib import socket import re import os.path from datetime import datetime from time import sleep # remember last PONG message time last_pong = datetime.now() # chat log relative path chat_log_path = utilities.get_file_path(__file__, cfg.chat_log_path) # twitch_commands relative path path_to_twitch_commands = utilities.get_file_path(__file__, "other/twitch_commands.txt") # compile regex to match twitch's message formatting CHAT_MSG = re.compile(r"^:\w+!\w+@\w+\.tmi\.twitch\.tv PRIVMSG #\w+ :") CHAT_MSG_SENDER = re.compile(r"^:\w+") def main() -> None: global last_pong utilities.set_state("terminate_flag", 0) utilities.set_state("mpv_mutex", 0)
a) Prompt the user for a state b) Iterate through each data file in the "files" directory, using the utilities.get_files_in_directory function, in order to calculate the daily change in covid cases. c) print the data and the daily change in cases to the screen. d) create an output CSV file to store your results 3. Finally, use tkinter to make a bar chart of cases by state by modifying the utilities.make_bar_chart function. ''' import utilities # Part 1: download the first covid-19 data file and # save it to the "files" directory: covid_report_links = utilities.get_covid_file_links() print(covid_report_links) # downloading one file... local_path = utilities.get_file_path('test_file.csv', subdirectory='files') utilities.download_remote_file(covid_report_links[0], local_path) # Part 2: Open all files in the 'files' directory and analyze them: files_directory = utilities.get_file_path('files') local_file_paths = utilities.get_files_in_directory(files_directory) print(local_file_paths) # Part 3: after you've analyzed the data, make a bar chart: utilities.make_bar_chart()
''' artists = [ [ 'Beyonce', 'https://i.scdn.co/image/9fef2047e4e3f05031807d5fa9e121b7862ba259', 'https://open.spotify.com/artist/6vWDO969PvNqNYHIOW5v0m' ], [ 'The Rolling Stones', 'https://i.scdn.co/image/85d9cb252ab4d8410d31820be40214c59f2597a1', 'https://open.spotify.com/artist/22bE4uQ6baNwSHPVcDxLCe' ], [ 'Madonna', 'https://i.scdn.co/image/96b4818a65820e91e0e17fcf55a4d2213b019ad4', 'https://open.spotify.com/artist/6tbjWDEIzxoDsBA1FuhfPW' ], ] for artist in artists: artist_name = artist[0] artist_image = artist[1] artist_url = artist[2] file_name = artist_name.replace(' ', '_').lower() + '.html' html_text = template.format(name=artist_name, image=artist_image, link=artist_url) file_path = utilities.get_file_path(file_name, subdirectory='results') f = open(file_path, 'w') f.write(html_text) f.close()
#example: print all keys and values one by one: import json import utilities playlist = [] try: # open playlist file if it exists and update playlist f = open(utilities.get_file_path('playlist.json'), 'r') playlist = json.loads(f.read()) f.close() except Exception: # if there's an error, just keep the existing playlist pass # Now, prompt the user for songs to add to her playlist: while True: artist_name = input('Artist: ') song_name = input('Song Name: ') genre = input('Genre: ') tags = input('Tags: ') year = input('Year Released: ') # YOUR CODE HERE playlist.append({ 'artist_name': artist_name, 'song_name': song_name, 'genre': genre, 'tags': tags, 'year': year
import urllib.request import utilities import ssl # How do you download some sample audio from the tracks data? # location of audio file: url_address_peace_of_mind = 'https://p.scdn.co/mp3-preview/7ea00cefb82b042c644cf5447a0d78f2d7546fd7?cid=9697a3a271d24deea38f8b7fbfa0e13c' # retrieve the data and save the data in a file_data variable: context = ssl._create_unverified_context() response = urllib.request.urlopen(url_address_peace_of_mind, context=context) file_data = response.read() # create a local file with the 'wb' flag local_path = utilities.get_file_path('peace_of_mind.mp3') f = open(local_path, 'wb') f.write(file_data) f.close()
''' You're working for the city and want to identify all of the senior citizens in your district to send out a mailer -- as they may qualify for a special tax waiver. You are given a (messy and incomplete) data set. Your boss wants you to create 2 files: * One for people with a complete data profile (no obvious mistakes) * One for people with an incomplete / suspect data profile Write a program to do this. ''' import utilities f_source = open(utilities.get_file_path('people.csv'), 'r') f_valid = open(utilities.get_file_path('people_valid.csv'), 'w') f_invalid = open(utilities.get_file_path('people_invalid.csv'), 'w') for line in f_source.readlines()[1:]: cells = line.split(',') name = cells[0] age = cells[1] address = cells[2] if len(cells) != 3: f_invalid.write(line) continue try: age = int(age) except: f_invalid.write(line)
import utilities file_path = utilities.get_file_path('thursday_lecture.txt') f = open(file_path, 'a', encoding='utf8') f.write('\nTest') f.close()
import utilities import ssl # for https requests from urllib.request import urlopen # 1. get it from the internet context = ssl._create_unverified_context() response = urlopen( 'https://www.apitutor.org/twitter/1.1/search/tweets.json?q=cats', context=context) file_data = response.read().decode('utf-8', 'ignore') # 2. print it to the screen # print(file_data) # 3. write it to a file file_path = utilities.get_file_path('data.json', subdirectory='results') f = open(file_path, 'w') f.write(file_data) f.close() print('Data file written to results/data.json. Go take a look!')
#example: print all keys and values one by one: import json import utilities # open and read file: f = open(utilities.get_file_path('eng2sp.json'), 'r') eng2sp = json.loads(f.read()) f.close() # Your job: # Modify the translation program below so that if the translation is not in # the dictionary, your program will: # 1. Ask the user to type in the translation so that it can "learn" how to # translate the word. # 2. Store the new translation in its dictionary, # 3. When the user asks to quit the program, the program will overwrite # the old dictionary with the more comprehensive version of the dictionary # (already done for you). while True: word = input( 'Enter a word in English and I will tell you the Spanish translation: ' ) if word.upper() == 'Q': print('quitting...\n') break # YOUR CODE HERE # END YOUR CODE HERE print('The translation for', word, 'is', eng2sp.get(word), '\n')
import utilities f = open(utilities.get_file_path('moby_dick.txt'), 'r', encoding='utf8') for line in f: print(line) f.close()
import obs_interface import utilities from time import sleep # twitch_commands relative path path_to_twitch_commands = utilities.get_file_path(__file__, "other/twitch_commands.txt") print('Launching term_utils') def set_blur(state): obs_interface.set_blur(state) def blur(state): set_blur(state) def chat(message, prefix='chat'): """ send command to twitch_commands.txt to be processed within the twitch_bot.py script """ # wait for mutex to unlock while utilities.twitch_commands_mutex: sleep(0.5) # lock mutex utilities.twitch_commands_mutex = 1 # start the mpv script
import utilities f = open(utilities.get_file_path('my_new_file.txt'), 'a', encoding='utf8') colors = ['red', 'pink', 'purple', 'orange', 'teal', 'blue'] for color in colors: f.write(color) f.write('\n') f.close()
response = urllib.request.urlopen(url, context=context) return json.loads(response.read().decode()) def save_track_to_disk(url:str, file_path:str): context = ssl._create_unverified_context() response = urllib.request.urlopen(url, context=context) file_data = response.read() # create a local file with the 'wb' flag f = open(file_path, 'wb') f.write(file_data) f.close() term = input('Enter a search term to look for some album covers: ') tracks = search_for_tracks(term) for track in tracks: album_url = track.get('album').get('image_url') album_name = track.get('album').get('name') if album_url is None: continue local_file_name = album_name.lower().replace(' ', '') + '.jpg' local_file_name = local_file_name.replace('/', '') local_file_name = local_file_name.replace('\'', '') # hack for VS code (not necessary in IDLE I don't think): local_file_path = utilities.get_file_path(local_file_name) print('Saving to:', local_file_name) save_track_to_disk(album_url, local_file_path) time.sleep(1)
''' You're working for the city and want to identify all of the senior citizens in your district to send out a mailer -- as they may qualify for a special tax waiver. You are given a (messy and incomplete) data set. Your boss wants you to create 2 files: * One for people with a complete data profile (no obvious mistakes) * One for people with an incomplete / suspect data profile Write a program to do this. ''' import utilities f = open(utilities.get_file_path('people.csv'), 'r') for line in f.readlines(): print(line)
import utilities source_file = open(utilities.get_file_path('moby_dick.txt'), 'r', encoding='utf8') destination_file = open(utilities.get_file_path('moby_dick_line_numbers.txt'), 'w', encoding='utf8') linenum = 1 for line in source_file: destination_file.write(str(linenum) + '. ') destination_file.write(line) linenum += 1 source_file.close() destination_file.close()
import utilities canvas = utilities.create_canvas_window() ################################################################################## # Your code below this line... f = open(utilities.get_file_path('bubbles.csv')) for line in f.readlines(): line = line.replace('\n', '') items = line.split(',') x = int(items[0]) y = int(items[1]) radius = int(items[2]) # print([x, y, radius]) utilities.make_circle(canvas, (x, y), radius) # Your code above this line ################################################################################## canvas.mainloop()
from csv import reader # Step 1: Download all data files from Johns Hopkins GitHub # Note: this function only downloads a day's COVID-19 file # if it hasn't been downloaded already. utilities.download_all_data_files() # Step 2: Analyze all data pertaining to state over time... state = 'Illinois' county = 'Cook' headers = ['Day', 'State', 'Total', 'Daily Total', 'Direction'] if county: file_name = state.lower() + '_' + county.lower() + '.csv' else: file_name = state.lower() + '.csv' out_file_path = utilities.get_file_path(file_name, subdirectory='files') out_file = open(out_file_path, 'w') out_file.write(','.join(headers) + '\n') # Step 3: initialize grouping and summing variables data = {} yesterdays_count = 0 todays_change = 0 yesterdays_change = 0 # Step 4: open each file in the directory and process it: file_names = utilities.get_files_in_directory('files') print('Analyzing', len(file_names), 'files (Jan - Nov, 2020) and extracting all data pertaining to', state,
canvas = utilities.create_canvas_window() ################################################################################## # Your code below this line... # Your job: Figure out how to use the data in the heart # file to draw a heart # start at top_left position (100, 100) x = 100 y = 100 pixel = 20 palette = ['white', '#E0607E', 'black', 'white'] f = open(utilities.get_file_path('heart.csv')) for line in f.readlines(): cells = line.split(',') x = 100 # reset x-position to 100 (start of every row) for cell in cells: cell = int(cell) fill_color = palette[cell] utilities.make_square(canvas, (x, y), pixel, color=fill_color, stroke_width=1 ) x += pixel y += pixel # go down to next row # Your code above this line ##################################################################################
import cfg import utilities import os.path import subprocess from random import randint from time import sleep import threading import re # get relative path of shell script t2s_script_path = utilities.get_file_path(__file__, cfg.t2s_script_path) def text2speech(_command_info, message, sender_username) -> None: message = re.sub(r"!say", "", message) # remove !say command in the message message = filter_text(message) text = f"{sender_username} says: {message}" speech_thread = threading.Thread(target=speak, args=(text, )) speech_thread.start() def speak(text) -> None: """ calls the text2speech shell script """ # wait for mutex to unlock while utilities.mpv_mutex: sleep(0.5)
import utilities from tkinter import Canvas, Tk import time gui = Tk() gui.title('Florida') window_width = gui.winfo_screenwidth() window_height = gui.winfo_screenheight() canvas = Canvas(gui, width=window_width, height=window_height, background='white') canvas.pack() ################################################################################## point_list = [] f = open(utilities.get_file_path('florida.csv')) for line in f.readlines(): line = line.replace('\n', '') items = line.split(',') x = float(items[0]) y = float(items[1]) point_list.append((x, y)) canvas.create_polygon(point_list, fill='white', outline='black', tag='florida') gui.update() while True: center = utilities.get_center(canvas, 'florida') width = utilities.get_width(canvas, 'florida') print(center, width) shape_ids = canvas.find_withtag('florida')
tf.keras.models.save_model(model, model_path) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('acc') plt.xlabel('epoch') plt.legend(['train', 'val'], loc= 'upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() print("\n---Training done !") return if __name__ == "__main__": train_data = get_file_path('other', 'train_data') train_model = get_file_path('result_train','model_name') isProcessData = get_argument() if isProcessData: train_iamge = get_folder_path('data_train') process_train_data(train_iamge) face_classification_train(train_data, train_model) pass