def __init__(self) -> None: App.__init__(self) #Allows access to controller class to send signals self.Controller = Controller() #For GPS/Mapping self.TargetInfo = TargetInfo() self.tp = None self.th_drone_control = None self.th_drone_adv_control = None self.th_doc = None Cache.remove('kv.image') Cache.remove('kv.texture') image = Image.open("Gmap/default.png") temp = image.copy() temp.save("Gmap/drone_path.png") image = Image.open("images/default_score.png") temp = image.copy() temp.save("images/scored.png") temp.save("images/prescored.png")
def generateGlyph(self): global curr_prd global curr_conf if (curr_prd == -1): shutil.copy("glyphNoData_crp.png", "currentGlyph.png") else: stemName = ['G', 'F', 'E', 'D', 'C', 'B', 'A'] # offset confidence so 0.3 is lowest then in steps of 0.1 upwards if curr_conf < 0.3: nameIndx = 0 else: nameIndx = int(10.0 * (curr_conf - 0.3)) nameIndx = np.clip(nameIndx, 0, 6) filename = '.\\VisEntGlyphs\\' + stemName[ nameIndx] + '_crp_over.png' source_img = Image.open(filename).convert("RGBA") draw = ImageDraw.Draw(source_img) font = ImageFont.truetype("arial.ttf", 240) num = str(curr_prd) xD, yD = draw.textsize(num, font=font) draw.text((320 - (xD / 2), 240 - (yD / 2)), num, fill=(255, 255, 0, 255), font=font) font = ImageFont.truetype("arial.ttf", 32) txt = "High ------------- probability ------------- Low" draw.text((20, 525), txt, fill=(51, 51, 51, 255), font=font) source_img.save("currentGlyph.png")
def view_selected_file(self, path, filename): try: if filename == []: self.root.ids._My_file_name.text = "MyFile" self.root.ids._Usb_file_name.text = "USB" self.root.ids._My_file_hint.text = "Select BMP File" self.root.ids._Usb_file_hint.text = "Select BMP File" elif (filename[0][-3:] != 'bmp'): self.root.ids._My_file_hint.text = "Select BMP File" self.root.ids._Usb_file_hint.text = "Select BMP File" else: if self.root.ids._screen_manager.current == 'myfile_screen': self.root.ids._My_file_name.text = filename[0] if self.root.ids._screen_manager.current == 'usb_screen': self.root.ids._Usb_file_name.text = filename[0] with Image.open(os.path.join(path, filename[0])) as stream: width, height = stream.size if self.root.ids._screen_manager.current == 'myfile_screen': self.root.ids._My_file_size.text = '{} x {}'.format( width, height) if self.root.ids._screen_manager.current == 'usb_screen': self.root.ids._Usb_file_size.text = '{} x {}'.format( width, height) except Exception: print('unknow error in view_selecected_file')
def img_to_text(self): from PIL import Image img = Image.open(self.img_to_text_path) text_extracted = image_to_string(img) text_extracted = text_extracted[:-2] self.screen.get_screen('home').ids.text_extracted.text = text_extracted self.screen.get_screen('home').ids.text_extracted.hint_text = '' Snackbar(text='Image to text conversion successfully done').open()
def scan(): """This methods sets up the pi camera and scans a qr-code. The method returns either a qr-code or False value""" print('scan method called') # starts the process of using the camera and scanning # in the following procedure the pi camera is called camera with PiCamera() as camera: # because it's unclear whether the camera works as expected it's in a try except block # if any problem occurs the program doesn't stop try: # starts a preview of the camera but not seen because of the kivy grafic framework camera.start_preview() # program sleeps for a second time.sleep(1) print('start scanning') # if any problem occurs inside the try block, the exception block will execute except: print('problem with pi camera start') # creates a file-like object for reading and writing bytes stream = BytesIO() # variable for the qr-code later qr_codes = None # Set timeout to 10 seconds timeout = time.time() + 10 # as long as there is no qr-code in qr_codes and the timeout is not reached stay in this loop # and try to get an qr-code while qr_codes is None and (time.time() < timeout): stream.seek(0) # Start camera stream (make sure RaspberryPi camera is focused correctly # manually adjust it, if not) # camera captures an image from the stream camera.capture(stream, "jpeg") stream.seek(0) # zbarlight scans the image stream for a specific code --> here it's a qr-code and puts it # into the variable qr_codes = zbarlight.scan_codes("qrcode", Image.open(stream)) # program stops for 50 milli seconds time.sleep(0.05) # after the loop the preview stops camera.stop_preview() # break immediately if we didn't get a qr code scan if not qr_codes: print('no qr code within 10 seconds') return False # decode the first qr_code to get the data qr_code = qr_codes[0].decode() return qr_code
def build(self): ikon=content[3] s = ScatterPlane(scale=.5) filename=join(imdir, 'background.png') print filename[:-1] filename2=join(imdir,content[1][:-1]) print filename2 im=Image(source=filename2,size=(1920,1090)) img=Image.open(filename2) exif_data=img._getexif(); s.add_widget(im) return s
def data_img(self): #将数据绘制在课表上 img = Image.open("form.png") draw = ImageDraw.Draw(img) font = ImageFont.truetype('simhei.ttf', 20) pos_x = 265.62 pos_y = 270.9 for each in self.res: for child in each: draw.text((pos_x, pos_y), child, fill=(255, 0, 255), font=font) pos_x = pos_x + 200 pos_x = 265.6 pos_y = pos_y + 220 img.save("course.png")
def circularize_img(img_fn, width, im=None, do_crop=True, bw=False, resize=True, circularize=True): from PIL import Image, ImageOps, ImageDraw from kivy.app import App import numpy as np log = App.get_running_app().log #if not im: im = Image.open(img_fn).convert('RGB') log('??', im) # get center if do_crop: im = crop_square(im, width, width) if resize: im = im.resize((width, width)) if bw: thresh = 175 fn = lambda x: 255 if x > thresh else 0 im = im.convert('L').point(fn, mode='1').convert('RGB') orig_color = (255, 255, 255) replacement_color = COLOR_ICON #(255,0,0) # img = im.convert('RGB') data = np.array(im) data[(data == orig_color).all(axis=-1)] = replacement_color im = Image.fromarray(data, mode='RGB').convert('RGBA') if circularize_img: bigsize = (im.size[0] * 3, im.size[1] * 3) mask = Image.new('L', bigsize, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + bigsize, fill=255) mask = mask.resize(im.size, Image.ANTIALIAS) im.putalpha(mask) # give back bytes log('!!', im) output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) imgByteArr = io.BytesIO() output.save(imgByteArr, format='PNG') # imgByteArr = imgByteArr.getvalue() imgByteArr.seek(0) return imgByteArr
def select_to(self,*args): try: print args[1][0] iw = Image.open(args[1][0]) iw.save('Yellow_Rose.jpg') gray = iw.convert('1') gray.save('Yellow_Rose.jpg') self.img3.source = 'Yellow_Rose.jpg' self.img4.source = 'Yellow_Rose.jpg' self.img.source = 'Yellow_Rose.jpg' self.img.reload() self.img3.reload() self.img4.reload() except: pass
def text(self, selection): if selection == []: layout = BoxLayout(orientation="vertical", spacing=10, padding=10) popupLabel = Label(text="No file is selected .") print("No file is selected .") closeButton = Button(background_normal='img\close.png', background_down='img\close.png', size_hint=(.38, .6), pos_hint={"center_x": .5}) layout.add_widget(popupLabel) layout.add_widget(closeButton) popup = Popup(title='Error', content=layout, size_hint=(.8, .6)) popup.open() closeButton.bind(on_press=popup.dismiss) engine = pyttsx3.init() if self.soundcount == 0: engine.say("No file is selected .") print("No file is selected .") engine.runAndWait() else: print("Input Path : {}".format(selection[0])) try: from PIL import Image except ImportError: import Image import pytesseract pytesseract.pytesseract.tesseract_cmd = 'C:\Softwares\Tesseract-OCR\Tesseract.exe' text = pytesseract.image_to_string(Image.open(selection[0])) if text == "": text = "There is no text in the image ." layout = BoxLayout(orientation="vertical", spacing=10, padding=10) popupLabel = Label(text=text) closeButton = Button(background_normal='img\close.png', background_down='img\close.png', size_hint=(.38, .31), pos_hint={"center_x": .5}) layout.add_widget(popupLabel) layout.add_widget(closeButton) popup = Popup(title='Text Recognition', content=layout, size_hint=(.8, .9)) popup.open() closeButton.bind(on_press=popup.dismiss) print(text) engine = pyttsx3.init() if self.soundcount == 0: engine.say(text) engine.runAndWait()
def uploadImage(self, instance): Tk().withdraw( ) # we don't want a full GUI, so keep the root window from appearing filename = askopenfilename( ) # show an "Open" dialog box and return the path to the selected file print(filename) transform = transforms.Compose( [transforms.Resize(128), transforms.ToTensor()]) image = Image.open(filename) image_tensor = transform(image).float() image_tensor = image_tensor.unsqueeze_(0) output = model(image_tensor) index = output.data.numpy().argmax() print(location[index])
def on_touch_down(self, touch): super(CalibrationScreen, self).on_touch_down(touch) # Normalized position on the image. # First get the coordinates clicked, 0, 0 starts at the bottom left of the image (determined by image_offset). normalized_pos = [touch.x - self.image_offset[0], touch.y - self.image_offset[1]] # Get the location clicked IN PERCENTAGE of the image's size: normalized_pos[0] = normalized_pos[0] / self.image_dimensions[0] normalized_pos[1] = normalized_pos[1] / self.image_dimensions[1] # Check if image was clicked and selection in progress, basically if percentage is above 100 or below 0 # we're calculating based on 0-1 where 1 is 100%. If self.selection is -1, no selection was made and # nothing happens. if(self.selection == -1 or normalized_pos[0] > 1 or normalized_pos[0] < 0 or normalized_pos[1] > 1 or normalized_pos[1] < 0): # Selection is not turned on OR the user clicked outside the image. Do NADA! return # Open the image that was clicked, we're gonna get them pesky pixels img = Image.open('calibImage.jpg') # The percentage times the image's width and height, where da pixel at? img_x = normalized_pos[0] * img.width img_y = normalized_pos[1] * img.height # Invert the y coordinate inside the image: img_y = img.height - img_y # Grab color values of the clicked pixel. r, g, b = img.getpixel((img_x, img_y)) # Depending on action, set min or max. if self.selection == 0: CURRENT_COLOR.set_min([b, g, r]) self.r_min = r / 255 self.g_min = g / 255 self.b_min = b / 255 if self.selection == 1: CURRENT_COLOR.set_max([b, g, r]) self.r_max = r / 255 self.g_max = g / 255 self.b_max = b / 255 # Resets select action. self.selection = -1 self.current_action_string = 'No action selected' self.update_color_string()
def getImagesAndLabels(path): imagePaths = [os.path.join(path, f) for f in os.listdir(path)] faceSamples = [] ids = [] for imagePath in imagePaths: PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale img_numpy = np.array(PIL_img, 'uint8') id = int(os.path.split(imagePath)[-1].split(".")[1]) faces = detector.detectMultiScale(img_numpy) for (x, y, w, h) in faces: faceSamples.append(img_numpy[y:y + h, x:x + w]) ids.append(id) return faceSamples, ids
def update_icon(self, online=True): offline = '_offline' if online: offline = '' self.icon_wid.source = 'data/icon' + offline + '.png' try: upload_count = len(glob.glob('offline/*.json')) if upload_count > 0: img = Image.open('data/icon' + offline + '.png') draw = ImageDraw.Draw(img) draw.ellipse((50, 65, 95, 95), fill=(165, 208, 101, 0)) font = ImageFont.truetype("data/verdanab.ttf", 24) posx = 65 if upload_count > 9: posx = 55 draw.text((posx, 65), str(upload_count), (255, 255, 255), font=font) img.save('data/icon2' + offline + '.png') self.icon_wid.source = 'data/icon2' + offline + '.png' self.icon_wid.reload() except: traceback.print_exc(file=sys.stdout)
def capture(self): # Defining a format for the captured image camera = self.ids['camera'] timestr = time.strftime("%Y%m%d_%H%M%S") # Saving captured image camera.export_to_png("IMG_{}.png".format(timestr)) # Function inheriting from PIL module # Used to crop white space out of an image def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox) # Trimming the captured image and replacing it with the trimmed image im = Image.open("IMG_{}.png".format(timestr)) im = trim(im) im.save("IMG_{}.png".format(timestr)) # Defining each popup for each disease with statically defined dynamics def show_popup_akiec(): show = akiec() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_bcc(): show = bcc() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_bkl(): show = bkl() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_df(): show = df() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_mel(): show = mel() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_nv(): show = nv() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_vasc(): show = vasc() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_norm(): show = norm() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() def show_popup_obj(): show = obj() popupWindow = Popup(title="The Results", content=show, size_hint=(None, None), size=(400, 400)) popupWindow.open() # Function inheriting from cv2 module # Read the image and convert to grayscale # Resize the image to match model's expected sizing def prepare(filepath): img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) new_array = cv2.resize(img_array, (28, 28)) return new_array.reshape(-1, 28, 28, 1) # Making a dictionary for each skin disease classification # Machine learning will assign an integer value to each classification key key = { "akiec": 0, "bcc": 1, "bkl": 2, "df": 3, "mel": 4, "nv": 5, "vasc": 6, "norm": 7, "obj": 8 } key = {v: k for k, v in key.items()} # Defining a prediction which contains numerical values based on the image we give it prediction = cnn_model.predict(prepare("IMG_{}.png".format(timestr))) print(prediction) # Set of conditional statements used to display the right popup based on the machine learning prediction if prediction[0][0] == 1: show_popup_akiec() print("The ml says " + key[0]) time.sleep(1) elif prediction[0][1] == 1: show_popup_bcc() print("The ml says " + key[1]) time.sleep(1) elif prediction[0][2] == 1: show_popup_bkl() print("The ml says " + key[2]) time.sleep(1) elif prediction[0][3] == 1: show_popup_df() print("The ml says " + key[3]) time.sleep(1) elif prediction[0][4] == 1: show_popup_mel() print("The ml says " + key[4]) time.sleep(1) elif prediction[0][5] == 1: show_popup_nv() print("The ml says " + key[5]) time.sleep(1) elif prediction[0][6] == 1: show_popup_vasc() print("The ml says " + key[6]) time.sleep(1) elif prediction[0][7] == 1: show_popup_norm() print("The ml says " + key[7]) time.sleep(1) elif prediction[0][8] == 1: show_popup_obj() print("The ml says " + key[8]) time.sleep(1)
def guardar_informe(self): # GESTION DE ERRORES global datos_introducidos try: self.verificar_parametros(datos_introducidos) except ErrorEntrada as e: self.show_popup(e.message) return try: self.revisar_audio_cargado() except ErrorEntrada as e: self.show_popup(e.message) return frecuencias = [63,125,250,500,1000,1500,2000,3000,4000,6000,8000] h,h_prima,atenuacion_h,atenuacion_h_prima = self.calcular_prt() wbk = xlwt.Workbook(encoding='utf-8') hoja_datos = wbk.add_sheet('1. DATOS', cell_overwrite_ok=True) hoja_datos.protect = True sheet = wbk.add_sheet(u'2. CALCÚLE', cell_overwrite_ok=True) sheet.protect = True sheet1 = wbk.add_sheet(u'3. ESCÚCHE', cell_overwrite_ok=True) sheet1.protect = True sheet2 = wbk.add_sheet(u'4. PREVÉNGA', cell_overwrite_ok=True) sheet2.protect = True alignment = xlwt.Alignment() # Create Alignment alignment.horz = xlwt.Alignment.HORZ_CENTER borders = xlwt.Borders() # Create Borders borders.left = xlwt.Borders.MEDIUM borders.right = xlwt.Borders.MEDIUM borders.top = xlwt.Borders.MEDIUM borders.bottom = xlwt.Borders.MEDIUM borders.left_colour = 0x40 borders.right_colour = 0x40 borders.top_colour = 0x40 borders.bottom_colour = 0x40 estilobg = xlwt.XFStyle() pattern = xlwt.Pattern() # Create the Pattern pattern.pattern = xlwt.Pattern.NO_PATTERN pattern.pattern_fore_colour = xlwt.Style.colour_map['white'] # ENCABEZADOS 1 -ARIAL 13- -BOLD- -IZQUIERDA- -CON BORDES- estilo11 = xlwt.XFStyle() font = xlwt.Font() font.name = 'Arial' font.height = 260 font.bold = True estilo11.font = font estilo11.borders = borders # TÍTULO -ARIAL 15- -BOLD- -CENTRADO- estilo = xlwt.XFStyle() font = xlwt.Font() font.height = 300 font.bold = True estilo.font = font estilo.alignment = alignment # ENCABEZADOS 2 -ARIAL 13- -BOLD- -CENTRADO- -SIN BORDES- estilo12 = xlwt.XFStyle() font.height = 260 font.bold = True estilo12.font = font estilo12.alignment = alignment # FRECUENCIAS -ARIAL 13- -BOLD- -CENTRADO- -CON BORDES- estilo1 = xlwt.XFStyle() font.height = 260 font.bold = True estilo1.font = font estilo1.alignment = alignment estilo1.borders = borders # VALORES -ARIAL 13- -CENTRADO- -CON BORDES- estilo2 = xlwt.XFStyle() font = xlwt.Font() font.height = 260 estilo2.font = font estilo2.alignment = alignment estilo2.borders = borders # VALORES -ARIAL 13- -CENTRADO- -SIN BORDES- estilo21 = xlwt.XFStyle() font = xlwt.Font() font.height = 260 estilo21.font = font estilo21.alignment = alignment fecha_hora = datetime.now().strftime('%d/%m/%y %H:%M') # DATOS INTRODUCIDOS ___________________________________________________ edad= datos_introducidos.edad sexo = datos_introducidos.sexo tiempo_exposicion = datos_introducidos.tiempo_exposicion nivel_exposicion = datos_introducidos.nivel_exposicion fractil = datos_introducidos.fractil hoja_datos.write_merge(0,4,0,13,'',estilobg) hoja_datos.write_merge(5,9,0,5,'',estilobg) hoja_datos.write_merge(5,9,8,13,'',estilobg) hoja_datos.write_merge(10,11,0,13,'',estilobg) hoja_datos.write_merge(0,0,0,13,u'INFORME SOFTWARE CALCÚLE, ESCÚCHE Y PREVÉNGA',estilo) hoja_datos.write_merge(1,1,0,13,'Fecha: '+ fecha_hora + ' - Página 1/4',estilo12) hoja_datos.write_merge(3,3,0,13,u'DATOS',estilo12) hoja_datos.write(5,6,'Edad:',estilo11) hoja_datos.write(6,6,'Sexo:',estilo11) hoja_datos.write(7,6,u'Fráctil:',estilo11) hoja_datos.write(8,6,u'Tiempo de Exposición:',estilo11) hoja_datos.write(9,6,u'Nivel de Exposición:',estilo11) hoja_datos.write(5,7,edad,estilo2) hoja_datos.write(6,7,sexo,estilo2) hoja_datos.write(7,7,fractil,estilo2) hoja_datos.write(8,7,tiempo_exposicion,estilo2) hoja_datos.write(9,7,nivel_exposicion,estilo2) hoja_datos.col(6).width = 256*28 hoja_datos.col(7).width = 256*7 i=0 while i < 6: hoja_datos.col(i).width = 256*6 i+=1 i=8 while i < 14: hoja_datos.col(i).width = 256*6 i+=1 hoja_datos.write_merge(12,12,0,13,'Continúe a la página 2 >>>>', estilo21) # hoja_datos.write_merge(12,12,0,13,'Con ɔopyleft', estilo21) # INFORME CALCÚLE ______________________________________________________ sheet.write_merge(0,4,0,13,'',estilobg) sheet.write_merge(5,7,0,0,'',estilobg) sheet.write_merge(5,7,13,13,'',estilobg) sheet.write_merge(8,35,0,13,'',estilobg) sheet.write_merge(0,0,0,13,u'INFORME SOFTWARE CALCÚLE, ESCÚCHE Y PREVÉNGA',estilo) sheet.write_merge(1,1,0,13,'Fecha: '+ fecha_hora + ' - >>> Página 2/4 <<<',estilo12) sheet.write_merge(3,3,0,13,u'ESTIMACIÓN DE PÉRDIDA AUDITIVA H Y H\'',estilo12) sheet.write(5,1,'Frec. (Hz)',estilo11) sheet.write(6,1,'H',estilo11) sheet.write(7,1,'H\'',estilo11) for i,x in enumerate(frecuencias): sheet.write(5,i+2,frecuencias[i],estilo1) # Freq sheet.write(6,i+2,h[i],estilo2) # H sheet.write(7,i+2,h_prima[i],estilo2) # H' # ancho columnas sheet.col(0).width = 256*7 # una pulgada sheet.col(1).width = 256*13 # 256 * Nro caracteres sheet.col(2).width = 256*6 sheet.col(3).width = 256*6 sheet.col(4).width = 256*6 i=5 while i < 14: sheet.col(i).width = 256*7 i+=1 ancho = 605 alto = 454 imagen = Image.open(os.path.join('images', 'H_y_Hprima_octavs.png')).resize((ancho,alto),Image.ANTIALIAS).convert('RGB').save(os.path.join('images', 'H_y_Hprima_octavs.bmp')) sheet.insert_bitmap(os.path.join('images', 'H_y_Hprima_octavs.bmp'),9,1) # INFORME ESCÚCHE __________________________________________________________________ sheet1.write_merge(0,39,0,13,'',estilobg) sheet1.write_merge(0,0,0,13,u'INFORME SOFTWARE CALCÚLE, ESCÚCHE Y PREVÉNGA',estilo) sheet1.write_merge(1,1,0,13,'Fecha: '+ fecha_hora + ' - >>> Página 3/4 <<<',estilo12) global directorio_audio audio_seleccionado= directorio_audio sheet1.write_merge(3,3,0,13,'Audio: '+ os.path.basename(audio_seleccionado),estilo21) sheet1.write_merge(5,5,0,13,u'FORMA DE ONDA',estilo12) sheet1.write_merge(12,12,0,13,u'ESPÉCTRO',estilo12) alto1= 95 ancho1= 296 + 100 imagen1 = Image.open(os.path.join('images', 'forma_de_onda.png')).resize((ancho1,alto1),Image.ANTIALIAS).convert('RGB').save(os.path.join('images', 'forma_de_onda.bmp')) sheet1.insert_bitmap(os.path.join('images', 'forma_de_onda.bmp'),6,3) ancho11= ancho + 165 imagen11 = Image.open(os.path.join('images', 'espectros.png')).resize((ancho11,alto),Image.ANTIALIAS).convert('RGB').save(os.path.join('images', 'espectros.bmp')) sheet1.insert_bitmap(os.path.join('images', 'espectros.bmp') ,13,1) sheet1.col(0).width = 256*7 # una pulgada sheet1.col(1).width = 256*13 # 256 * Nro caracteres sheet1.col(2).width = 256*6 sheet1.col(3).width = 256*6 sheet1.col(4).width = 256*6 i=5 while i < 14: sheet1.col(i).width = 256*7 i+=1 # INFORME PREVÉNGA _______________________________________________________________ sheet2.write_merge(0,4,0,13,'',estilobg) sheet2.write_merge(5,7,0,0,'',estilobg) sheet2.write_merge(5,7,13,13,'',estilobg) sheet2.write_merge(8,39,0,13,'',estilobg) sheet2.write_merge(0,0,0,13,u'INFORME SOFTWARE CALCÚLE, ESCÚCHE Y PREVÉNGA',estilo) sheet2.write_merge(1,1,0,13,'Fecha: '+ fecha_hora + ' - >>> Página 4/4 <<<',estilo12) sheet2.write_merge(3,3,0,13,u'ATENUACIÓN PROTECTORES AUDITIVOS',estilo12) sheet2.write(5,1,'Frec. (Hz)',estilo11) sheet2.write(6,1,'At. EPI',estilo11) sheet2.write(7,1,'At. Est. H\'',estilo11) protector_auditivo= self.spinner_prt.text sheet2.write_merge(9,9,0,6,u'Protector Auditivo: '+ protector_auditivo,estilo21) for i,x in enumerate(frecuencias): sheet2.write(5,i+2,frecuencias[i],estilo1) sheet2.write(6,i+2,atenuacion_h[i],estilo2) sheet2.write(7,i+2,atenuacion_h_prima[i],estilo2) sheet2.col(0).width = 256*7 # una pulgada sheet2.col(1).width = 256*13 # 256 * Nro caracteres sheet2.col(2).width = 256*6 sheet2.col(3).width = 256*6 sheet2.col(4).width = 256*6 i=5 while i < 14: sheet2.col(i).width = 256*7 i+=1 ancho2= 76 alto2= 76 imagen2 = Image.open(os.path.join('images', 'grafica_protectores.png')).resize((ancho,alto),Image.ANTIALIAS).convert('RGB').save(os.path.join('images', 'grafica_protectores.bmp')) sheet2.insert_bitmap(os.path.join('images', 'grafica_protectores.bmp'),13,1) imagen_protector= self.imagen_prt_epi.source imagen22 = Image.open(imagen_protector).resize((ancho2,alto2),Image.ANTIALIAS).convert('RGB').save(os.path.join('images', 'protector_seleccionado.bmp')) sheet2.insert_bitmap(os.path.join('images', 'protector_seleccionado.bmp') ,9,11) wbk.save('Informe_CEP.xls') os.system('start excel.exe "Informe_CEP.xls"')
def load_gif(self, *args) -> None: image = Image.open("images/loading.png") temp = image.copy() temp.save("Gmap/drone_path.png")
def btn(self): try: im = Image.open(self.path) # open selected image pix = im.load() #load # Obtaining the RGB matrices r = [] g = [] b = [] for i in range(im.size[0]): r.append([]) # [ [] ] g.append([]) # [ [] ] b.append([]) # [ [] ] for j in range(im.size[1]): rgbPerPixel = pix[i, j] r[i].append(rgbPerPixel[0]) # add red values g[i].append(rgbPerPixel[1]) # add green values b[i].append(rgbPerPixel[2]) # add blue values m = im.size[0] # width, row n = im.size[1] # height, column # Vectors Kr and Kc alpha = int(self.secr) # checkbox selection #if low alpha=4, medium alpha=6, high alpha=8 # converted Row&Column into an one-dimensional array # len(kr) = number of image's rows, len(kc) = number of image's cols Kr = [randint(0, pow(2, alpha) - 1) for i in range(m)] # random int between 0,2^alpha -1 Kc = [randint(0, pow(2, alpha) - 1) for i in range(n) ] #default alpha=8 -> (0, 256 - 1) for max security ITER_MAX = 1 #print('Vector Kr : ', Kr) #print('Vector Kc : ', Kc) ''' #keep inform about the image in txt for decrypt later f = open('keys.txt', 'w+') f.write('Vector Kr : \n') for a in Kr: f.write(str(a) + '\n') f.write('Vector Kc : \n') for a in Kc: f.write(str(a) + '\n') f.write('ITER_MAX : \n') f.write(str(ITER_MAX) + '\n') ''' for iterations in range(ITER_MAX): # For each row for i in range(m): rTotalSum = sum(r[i]) # sum all red pixels gTotalSum = sum(g[i]) # #sum all green pixels bTotalSum = sum(b[i]) # #sum all blue pixels rModulus = rTotalSum % 2 # mod(2) gModulus = gTotalSum % 2 # mod(2) bModulus = bTotalSum % 2 # mod(2) if (rModulus == 0): # if divided by 2 # Kr[i] values come first from the last in r array r[i] = numpy.roll( r[i], Kr[i] ) #'r' rolling with 'Kr[i]' shift, shifting Kr[i] places else: # not leaves a remainder of 0 # Kr[i] values go last from the first in r array r[i] = numpy.roll( r[i], -Kr[i]) #'r' rolling with ' -Kr[i]' shift if (gModulus == 0): # leaves a remainder of 0 g[i] = numpy.roll(g[i], Kr[i]) else: g[i] = numpy.roll(g[i], -Kr[i]) if (bModulus == 0): # mod(2) == 0 b[i] = numpy.roll(b[i], Kr[i]) else: b[i] = numpy.roll(b[i], -Kr[i]) # For each column for i in range(n): rTotalSum = 0 gTotalSum = 0 bTotalSum = 0 for j in range(m): rTotalSum += r[j][i] #r[row][column] gTotalSum += g[j][i] #g[row][column] bTotalSum += b[j][i] #b[row][column] rModulus = rTotalSum % 2 #mod(2) gModulus = gTotalSum % 2 bModulus = bTotalSum % 2 # almost doing same things with rows if (rModulus == 0): #leaves a remainder of 0 upshift(r, i, Kc[i]) #'r' rolling with ' -Kr[i]' shift else: downshift(r, i, Kc[i]) #'r' rolling with ' Kr[i]' shift if (gModulus == 0): # if divided by 2 upshift(g, i, Kc[i]) else: downshift(g, i, Kc[i]) # shifting Kr[i] places if (bModulus == 0): upshift(b, i, Kc[i]) #'b' rolling with ' -Kr[i]' shift else: downshift(b, i, Kc[i]) # For each row for i in range(m): for j in range(n): if (i % 2 == 1): #if not divided by 2 # XOR(^) r[i][j] = r[i][j] ^ Kc[ j] # rolled(shifted) red values XOR random column values g[i][j] = g[i][j] ^ Kc[ j] # rolled green values XOR random column values b[i][j] = b[i][j] ^ Kc[ j] # rolled blue values XOR random column values else: # rotate180 means reversed r[i][j] = r[i][j] ^ rotate180( Kc[j]) # rotate column values by 180 g[i][j] = g[i][j] ^ rotate180( Kc[j]) # reversed column values b[i][j] = b[i][j] ^ rotate180( Kc[j] ) # rolled blue values XOR reversed rand column values # For each column for j in range(n): for i in range(m): if (j % 2 == 0): r[i][j] = r[i][j] ^ Kr[ i] # rolled(shifted) red values XOR random row values g[i][j] = g[i][j] ^ Kr[ i] # rolled green values XOR random row values b[i][j] = b[i][j] ^ Kr[ i] # rolled blue values XOR random row values else: r[i][j] = r[i][j] ^ rotate180( Kr[i] ) # rolled red values XOR reversed rand row values g[i][j] = g[i][j] ^ rotate180( Kr[i] ) # rolled green values XOR reversed rand row values b[i][j] = b[i][j] ^ rotate180( Kr[i] ) # rolled blue values XOR image's reversed rand row values for i in range(m): for j in range(n): pix[i, j] = (r[i][j], g[i][j], b[i][j]) # piece together encrypted_image_path = 'encrypted_images/' + str(self.tail_name[1]) im.save(encrypted_image_path) # encrypted image self.encr_image.source = encrypted_image_path # shows encrypted image except: pass
def next_image(self, *largs): # whatever = None): print "Showing next image" global iCurrentIndex global iShownLength if (iCurrentIndex == (iShownLength - 1)) or (iShownLength == 0): strRandom = random.choice(self.photos) EventLoop.window.title = str(len(self.photos)) + '/' + str(intCount) + ' Loading:' + strRandom try: #comment out the exception to try loading by setting image source raise Exception("Processing with PIL") img.source = strRandom except Exception: try: global strCache global bBuildCache global strFileName strCached = "" strFileName = os.path.basename(strRandom) if (os.path.isdir(strCache)) and (os.path.isfile(strCache + strFileName)): strCached = strCache + strFileName elif (os.path.isdir(strCache)) and (bBuildCache == True): print "Adding image to cache..." from PIL import Image im = Image.open(strRandom) imgWidth, imgHeight = im.size bResized = False #wndWidth, wndHeight = if imgHeight > sngHeight: bResized = True sngChange = sngHeight / imgHeight imgHeight = int(sngHeight) imgWidth = int(imgWidth * sngChange) if imgWidth > sngWidth: bResized = True sngChange = sngWidth / imgWidth imgWidth = int(sngWidth) imgHeight = int(imgHeight * sngChange) szNew = imgWidth, imgHeight #if bResized: #set to true to save all images to the cache if True: im=im.resize(szNew, Image.ANTIALIAS) if not (strCache == ""): if not os.path.exists(strCache): os.makedirs(strCache) strCached = strCache + os.path.basename(strRandom) if (not os.path.isfile(strCached)): try: im.save(strCached) except Exception as e: print "save cache file problem: " + e.message strCached = "" else: print "save cache file found" #else: # im.save(strTemp) if not strCached == "": #img.source = strCached try: self.shown.remove(strCached) except: print "error removing image from cache" self.shown.append(strCached) iShownLength = len(self.shown) iCurrentIndex = iShownLength - 1 else: #img.source = strRandom try: self.shown.append(strRandom) except: print "error removing image from cache" self.shown.append(strRandom) iShownLength = len(self.shown) iCurrentIndex = iShownLength - 1 #img.reload() except Exception as e: print "Error(" + e.message + ") loading image " + strRandom #continue with the logic self.photos.remove(strRandom) if len(self.photos) < 1: self.photos = glob.glob(strDir + "*.jpg") #global bReschedule else: iCurrentIndex += 1 self.show_current_image()
# Create our spotify object with permissions spotifyObject = spotipy.Spotify(auth=token) devices = spotifyObject.devices() deviceID = devices['devices'][0]['id'] # Current track information track = spotifyObject.current_user_playing_track() if track != None: artist = track['item']['artists'][0]['name'] playing = track['item']['name'] #define image ################################################################################ link = (track['item']['album']['images'][0]['url']) response = requests.get(link) img = Image.open(BytesIO(response.content)) img.thumbnail((300, 300)) img.save("img1.jpg") #Create kivy GUI ################################################################################ class MainWindow(Screen, Widget): def __init__(self, **kwargs): super(Screen, self).__init__(**kwargs) img.load() self.image = 'img1.jpg' self.track = playing self.artist = artist def on_touch_move(Widget, touch): volume = int(Widget.value)