def launch(self): if (self.source != None and self.target != None and type(self.source) != list and type(self.target) != list): seedget = self.seed.get() percentget = self.percent.get() try: fseed = int(seedget) except Exception: fseed = 100 try: fpercent = int(percentget) if fpercent <= 0 or fpercent > 100: fpercent = 100 except Exception: fpercent = 100 source, target = lecture(self.source, self.target, fseed, fpercent) result = relation(source, target) savetext = self.resum([[fseed], [fpercent]] + result) self.savef = Button(self.fen1, text="Save", command=lambda: self.savefile(savetext)) self.savef.grid(row=7, column=3, sticky="W") else: self.txtfile3.configure(text="Missing files")
def charger(self): # Ouverturfe de fenêtre de dialogue avec recherche réduite aux fichiers commençant par Sauv- fichier = QtWidgets.QFileDialog.getOpenFileName(window, 'Open file', 'F:\\MyPython\\Groupes\\',"Sauvegarde (Sauv*.*)") # Remplissage de la liste des sujets existants (gauche) # On vérifie que l'ouverture n'a pas été annulée (nom de fichier vide) if fichier[0] is not '': # On réinitialise les deux dictionnaires self.listEnfants.clear() self.listSurGroupes.clear() # On extrait le contenu de la sauvegarde self.sauvSession=lecture.lecture(fichier[0]) # Copie temporaire du dico de session self.sauvSessionMod=self.sauvSession.copy() # On remplit la liste de sujets de gauche à partir de la nouvelle sauvegarde de session self.listeSuj() # On créé un dictionnaire des enfants for suj in self.sauvSession.keys(): for par in self.sauvSession[suj]['Parent']: if par in self.listEnfants.keys(): self.listEnfants[par].append(suj) else: self.listEnfants[par]=[suj] # On créé un dictionnaire des sur-groupes for suj in self.sauvSession.keys(): for sousG in self.sauvSession[suj]['SousGroupe']: if sousG in self.listSurGroupes.keys(): self.listSurGroupes[sousG].append(suj) else: self.listSurGroupes[sousG]=[suj] # Copie temporaire des dicos de session self.listEnfantsMod=self.listEnfants.copy() self.listSurGroupesMod=self.listSurGroupes.copy()
def main(): data = [] while True: refresh() prix = lecture() print([prix[0], prix[1], prix[2], time.time()]) data.append([prix[0], prix[1], prix[2], time.time()]) if keyboard.is_pressed("q"): break
def lancement(): # On initialise la mémoire, registre, etc chip8.memoire = [0] * 4096 chip8.V = [0] * 16 chip8.I = 0 chip8.DT = 0 chip8.ST = 0 chip8.PC = 512 chip8.SP = 0 chip8.saut = [0] * 16 chip8.nbrSaut = 0 chip8.tabEcran = [[0 for x in range(64)] for x in range(32)] effacerEcran() chip8.chargerCaractere() ecran.focus_set() # On donne le focus à l'écran # On stock le programme en mémoire lecture.lecture(programme.get()) # On lance la procédure fonctionnement pour commencer le programme fonctionnement()
def full(): data = [] position = POSITION_ITEM_FIRST pyautogui.moveTo(position) pyautogui.click() time.sleep(1) item = is_item(position[0], position[1]) while item[0]: position = item[1] pyautogui.moveTo(position) pyautogui.click() prix = lecture() print([prix[0], prix[1], prix[2], time.time()]) data.append([prix[0], prix[1], prix[2], time.time()]) item = is_item(position[0], position[1]) return len(data), data
def ajouter(nomFichier): # Ajoute un objet de classe Ligne à listeLignes à partir d'un autre fichier .txt if nomFichier in listeFichiers: print("Attention : le fichier demandé est déjà pris en compte dans le calcul de l'itinéraire") elif lecture.lecture(nomFichier) != ['vide']: listeFichiers.append(nomFichier) listeLignes.append(Ligne(nomFichier,mid(nomFichier,5,len(nomFichier)-9))) for fichier in listeFichiers: print(" ♦",fichier) print("\nListe des arrêts disponibles (ne pas mettre d'accents):") affArrets = [] for liste in listeLignes: for arret in liste.path: affArrets.append(arret) print(list(set(affArrets))) else: print("\nAttention : le fichier demandé n'a pas été trouvé")
def __init__(self,nomFichier,nomLigne): self.nom = nomLigne self.slited_content = lecture.lecture(nomFichier) self.path = self.slited_content[0 if jour == "semaine" else 3].replace("+","n").split(" n ") self.date_go = lecture.dates2dic(self.slited_content[1 if jour == "semaine" else 4]) self.date_back = lecture.dates2dic(self.slited_content[2 if jour == "semaine" else 5])
def yolov3_video(path): """ Start of: Reading input video """ # Defining 'VideoCapture' object # and reading video from a file video = cv2.VideoCapture(path) # Preparing variable for writer # that we will use to write processed frames writer = None # Preparing variables for spatial dimensions of the frames h, w = None, None """ End of: Reading input video """ """ Start of: Loading YOLO v3 network """ # Loading class labels from file # Opening file with open('../utils/names/ts_data.names') as f: # Getting labels reading every line and putting them into the list labels = [line.strip() for line in f] # Loading trained YOLO v3 Objects Detector with the help of 'dnn' library from OpenCV path_to_weights = '../utils/weights/yolov3_ts_train_11000.weights' path_to_cfg = '../utils/cfg/yolov3_ts_test.cfg' network = cv2.dnn.readNetFromDarknet(path_to_cfg, path_to_weights) # Getting list with names of all layers from YOLO v3 network layers_names_all = network.getLayerNames() # Getting only output layers' names that we need from YOLO v3 algorithm # with function that returns indexes of layers with unconnected outputs layers_names_output = \ [layers_names_all[i[0] - 1] for i in network.getUnconnectedOutLayers()] # Setting minimum probability to eliminate weak predictions probability_minimum = 0.5 # Setting threshold for filtering weak bounding boxes # with non-maximum suppression threshold = 0.3 # Generating colours for representing every detected object # with function randint(low, high=None, size=None, dtype='l') colours = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8') """ End of: Loading YOLO v3 network """ """ Start of: Reading frames in the loop """ # Defining variable for counting frames # At the end we will show total amount of processed frames f = 0 # Defining variable for counting total time # At the end we will show time spent for processing all frames t = 0 # Defining loop for catching frames while True: # Capturing frame-by-frame ret, frame = video.read() # If the frame was not retrieved # e.g.: at the end of the video, # then we break the loop if not ret: break # Getting spatial dimensions of the frame # we do it only once from the very beginning # all other frames have the same dimension if w is None or h is None: # Slicing from tuple only first two elements h, w = frame.shape[:2] """ Start of: Getting blob from current frame """ # Getting blob from current frame # The 'cv2.dnn.blobFromImage' function returns 4-dimensional blob from current # frame after mean subtraction, normalizing, and RB channels swapping # Resulted shape has number of frames, number of channels, width and height # E.G.: # blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size, mean, swapRB=True) blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False) """ End of: Getting blob from current frame """ """ Start of: Implementing Forward pass """ # Implementing forward pass with our blob and only through output layers # Calculating at the same time, needed time for forward pass network.setInput(blob) # setting blob as input to the network start = time.time() output_from_network = network.forward(layers_names_output) end = time.time() # Increasing counters for frames and total time f += 1 t += end - start # Showing spent time for single current frame print('Frame number {0} took {1:.5f} seconds'.format(f, end - start)) """ End of: Implementing Forward pass """ """ Start of: Getting bounding boxes """ # Preparing lists for detected bounding boxes, # obtained confidences and class's number bounding_boxes = [] confidences = [] class_numbers = [] # Going through all output layers after feed forward pass for result in output_from_network: # Going through all detections from current output layer for detected_objects in result: # Getting 80 classes' probabilities for current detected object scores = detected_objects[5:] # Getting index of the class with the maximum value of probability class_current = np.argmax(scores) # Getting value of probability for defined class confidence_current = scores[class_current] # Eliminating weak predictions with minimum probability if confidence_current > probability_minimum: # Scaling bounding box coordinates to the initial frame size # YOLO data format keeps coordinates for center of bounding box # and its current width and height # That is why we can just multiply them elementwise # to the width and height # of the original frame and in this way get coordinates for center # of bounding box, its width and height for original frame box_current = detected_objects[0:4] * np.array( [w, h, w, h]) # Now, from YOLO data format, we can get top left corner coordinates # that are x_min and y_min x_center, y_center, box_width, box_height = box_current x_min = int(x_center - (box_width / 2)) y_min = int(y_center - (box_height / 2)) # Adding results into prepared lists bounding_boxes.append( [x_min, y_min, int(box_width), int(box_height)]) confidences.append(float(confidence_current)) class_numbers.append(class_current) """ End of: Getting bounding boxes """ """ Start of: Non-maximum suppression """ # Implementing non-maximum suppression of given bounding boxes # With this technique we exclude some of bounding boxes if their # corresponding confidences are low or there is another # bounding box for this region with higher confidence # It is needed to make sure that data type of the boxes is 'int' # and data type of the confidences is 'float' # https://github.com/opencv/opencv/issues/12789 results = cv2.dnn.NMSBoxes(bounding_boxes, confidences, probability_minimum, threshold) """ End of: Non-maximum suppression """ """ Start of: Drawing bounding boxes and labels """ # Checking if there is at least one detected object # after non-maximum suppression if len(results) > 0: # Going through indexes of results for i in results.flatten(): # Getting current bounding box coordinates, # its width and height x_min, y_min = bounding_boxes[i][0], bounding_boxes[i][1] box_width, box_height = bounding_boxes[i][2], bounding_boxes[ i][3] # Preparing colour for current bounding box # and converting from numpy array to list colour_box_current = colours[class_numbers[i]].tolist() # Drawing bounding box on the original current frame cv2.rectangle(frame, (x_min, y_min), (x_min + box_width, y_min + box_height), colour_box_current, 2) # Preparing text with label and confidence for current bounding box text_box_current = '{}: {:.4f}'.format( labels[int(class_numbers[i])], confidences[i]) # Putting text with label and confidence on the original image cv2.putText(frame, text_box_current, (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour_box_current, 2) """ End of: Drawing bounding boxes and labels """ """ Start of: Writing processed frame into the file """ # Initializing writer # we do it only once from the very beginning # when we get spatial dimensions of the frames if writer is None: # Constructing code of the codec # to be used in the function VideoWriter fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Writing current processed frame into the video file new_path = path[:-4] + '-result' + path[-4:] writer = cv2.VideoWriter(new_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True) # Write processed current frame to the file writer.write(frame) """ End of: Writing processed frame into the file """ """ End of: Reading frames in the loop """ # Printing final results print() print('Total number of frames', f) print('Total amount of time {:.5f} seconds'.format(t)) print('FPS:', round((f / t), 1)) # Releasing video reader and writer video.release() writer.release() lecture(str(new_path)) """
from os import chdir from speechrecognition import parler from interagir import robotResponse import audio from response_audio import textToSpeech import pydub import pygame from lecture import lecture path = "g:/python/smart_robot_test" language = "fr" chdir(path) import speechrecognition import interagir text = parler() response = robotResponse(text) print(response) textToSpeech(response) #conversion pygame.init() sound = pydub.AudioSegment.from_mp3("g:/python/smart_robot_test/response.mp3") sound.export(path + "/response.wav", format="wav") pygame.mixer.music.set_volume(0.99) lecture("g:/python/smart_robot_test/response.wav")
# Create an object of the class MFRC522 #MIFAREReader = MFRC522.MFRC522() # Welcome message print "Welcome to the MFRC522 data read example" print "Press Ctrl-C to stop." #Mettre absent tout le monde pour la journee gestion.ClearInEsilv() # This loop keeps checking for chips. If one is near it will get the UID and authenticate while continue_reading: #ecran.afficher("Presenter Carte") print("Presenter la Carte") uid = lecture.lecture() #print("id carte est " +str(id_carte)) if(uid != "None"): id_carte = str(uid) gestion.PresentWithId(id_carte) #print("l eleve est present") horaire = gestion.NextCours(id_carte) if horaire == []: ecran.afficher("Pas cours") else: show = str(horaire[0][3][0])+"H" + str(horaire[0][3][1])+" en "+str(horaire[1]+" ") ecran.afficher(show) print(show)