def getFramesFromVid(): os.chdir(actual_path) os.chdir('test') os.chdir('hsl') os.chdir('test4') print 'Iam: ', os.getcwd() # s'hauria d'executar la comanda ADD per passar els arrays vh264 (el video) a .mp4 # os.execute() files = os.listdir(os.getcwd()) print files print '\n' for file in files: namefile, extension = os.path.splitext(file) if (extension == '.mp4' or extension == '.MP4') and namefile == 'test': print 'filename: ', namefile # per grabar els frames del video vhs264 a fotos: count = 0 vidcap = cv2.VideoCapture(file) success, image = vidcap.read() #hsv_img = rgb2hsv(image) #hsv_img = hsv_img[:,:,:] * 255 # hue, saturation, lightness or luminance (color/to, saturació, brillantor) hsl_img = hasel.rgb2hsl(image) hsl_img = hsl_img[:, :, :] * 255 while success: #hsv_img = rgb2hsv(image) #hsv_img = hsv_img[:,:,:] * 255 # hue, saturation, lightness or luminance (color/to, saturació, brillantor) hsl_img = hasel.rgb2hsl(image) hsl_img = hsl_img[:, :, :] * 255 # cmyk #cmyk_img = phim.rgb2cmyk(np.asarray(img)) # ycbcr #ycbcr_img = rgb2ycbcr(img) cv2.imwrite("%d00HSL.jpg" % count, hsl_img[:, :, :]) cv2.imwrite("%d00HSL2.jpg" % count, hsl_img[:, :, 2]) #cv2.imwrite("%d-rgb.jpg" % count,image[:,:,:]) #cv2.imwrite("hsllll1-%d.jpg" % count,hsl_img[:,:,1]) #cv2.imwrite("hsllll0-%d.jpg" % count,hsl_img[:,:,0]) #cv2.imwrite("estatic-frame-hsl%d.jpg" % count, hsl_img) # save frame as JPEG file #cv2.imwrite("estatic-frame-hsv%d.jpg" % count, hsv_img) # save frame as JPEG file success, image = vidcap.read() count += 1 print 'Count: ', count
def run_threaded(self): # if threaded = True # this function is run instead of run # You can modify the image frame like any other np array #out = self.frame[:][200:600] # Test with local photo out = Image.open("./mlep.jpg") out = np.array(out) # Extract reds # out_R = out[:,:,0] # return out_R # convert to HSL # out = self.rgb_to_hsv(out) # using hasel out = hasel.rgb2hsl(out) # Using opencv #out = cv2.cvtColor(out, cv2.COLOR_RGB2HLS) thresh = (0., 1) channel = out[:, :, 1] binary_output = np.zeros_like(channel) binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1 return binary_output
def get_pixels_for_image(img_file_path): print("reading image...") img_rgb = io.imread(img_file_path) print("converting color space...") img_hsl = hasel.rgb2hsl(img_rgb) hsl_colors = img_hsl.reshape((-1, 3)) print( "filtering out darkest colors before clustering for better results...") samples_before = hsl_colors.shape[0] hsl_colors = hsl_colors[hsl_colors[:, 2] > v_threshold] samples_after = hsl_colors.shape[0] print("filtered out " + str(100 - (100 * samples_after) // samples_before) + "% of pixels") return hsl_colors
def get_pixels_for_image(img_file_path): log("reading image \"" + img_file_path + "\"") img_rgb = io.imread(img_file_path) log("converting color space...") if (len(img_rgb.shape) < 3): # this is a greyscale image w, h = img_rgb.shape ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, 0] = img_rgb ret[:, :, 1] = img_rgb ret[:, :, 2] = img_rgb img_rgb = ret img_hsl = hasel.rgb2hsl(img_rgb[:, :, 0:3]) hsl_colors = img_hsl.reshape((-1, 3)) return hsl_colors
def _shootWater(self, op='hsl'): PIN = 27 GPIO.setmode(GPIO.BCM) GPIO.setup(PIN, GPIO.OUT) GPIO.output(PIN, 1) rawCapture = PiRGBArray(self.picamera) self.picamera.framerate = 10 for image in self.picamera.capture_continuous(rawCapture, format="bgr"): if self.extinct: print('fuego extinguido') break cv2.imwrite('fuego.jpg', image.array) image.truncate(0) np_arr_rgb = cv2.imread('fuego.jpg', 1) photo = hasel.rgb2hsl(np_arr_rgb) photo[:, :, :] = photo[:, :, :] * 255 black = np.uint8(photo[:, :, 2]) nums, num_counter = np.unique(black[:, :], return_counts=True) freq_dic = dict(zip(nums, num_counter)) try: if freq_dic[255] > ( self.recognizer_size_y * self.recognizer_size_x) * self.white_tolerance: print('Sigue habiendo fuego') self.extinct = False else: print('No hay fuego ya') GPIO.output(PIN, 0) GPIO.cleanup() self.extinct = True except KeyError: print 'no veog foc' self.extinct = True GPIO.output(PIN, 0) GPIO.cleanup()
h, s, d = tf.split(hsd_img, 3, axis=-1) gamma = self.e_step(d) return gamma def eval(self, rgb_img, hsd_img): """ Perform a step needed for inference """ if isinstance(hsd_img, np.ndarray): hsd_img = tf.convert_to_tensor(hsd_img, dtype=tf.float32) h, s, d = tf.split(hsd_img, 3, axis=-1) gamma = self.e_step(d) _, mu, std = self.m_step(hsd_img, gamma) return mu, std, gamma if __name__ == '__main__': import hasel import imageio dcgmm = DCGMM() rgb_img = imageio.imread('images/0', pilmode='RGB') hsd_img = hasel.rgb2hsl(rgb_img)[None, :, :, :] h, s, d = dcgmm(rgb_img, hsd_img)
def run(self, ini_x, ini_y, fi_x, fi_y, zone, cxx, cyy): #self.picamera.resolution = (640,480) rawCapture = PiRGBArray(self.picamera) #self.picamera.start_preview() #self.picamera.framerate = 3 num_frames = 2 i = 0 positives_warm = 0 negatives_warm = 0 positives = 0 negatives = 0 #for i in range(num_frames): for image in self.picamera.capture_continuous(rawCapture, format="bgr"): #print ('ZONA:'+str(zone)+'frame :'+str(i)+' | thread '+str(zone)) if i == num_frames: self.isFire = True break #self.mutex.acquire() #self.picamera.capture('tmp'+str(zone)+str(i)+'.jpg','jpeg') #self.mutex.release() cv2.imwrite('tmp' + str(zone) + str(i) + '.jpg', image.array) image.truncate(0) np_arr_rgb = cv2.imread('tmp' + str(zone) + str(i) + '.jpg', 1) photo = hasel.rgb2hsl(np_arr_rgb) photo[:, :, :] = photo[:, :, :] * 255 black = np.uint8(photo[:, :, 2]) warm_results = [] max_num_colors = fi_y * fi_x num = 0 cX = 0 cY = 0 segment = black[ini_x:fi_x, ini_y:fi_y] #cv2.imwrite('zona'+str(zone)+'-frame'+str(i)+'.jpg',segment) M = cv2.moments(segment) try: cY = int(M["m10"] / M["m00"]) cX = int(M["m01"] / M["m00"]) #print 'Anteriors: ',cxx,cyy,' actuals: ',cX,cY #print 'Diferencia | x: ',abs(cX-cxx),' y: ',abs(cY-cyy) if (abs(cX - cxx) > int(fi_x - ini_x) * self.motion_tolerance): positives += 1 else: negatives += 1 if (abs(cY - cyy) > int(fi_y - ini_y) * self.motion_tolerance): positives += 1 else: negatives += 1 w_result = self.warmColorDetection(np_arr_rgb, ini_x, ini_y, fi_x, fi_y, zone) if w_result: positives_warm += 1 else: negatives_warm += 1 except ZeroDivisionError: print('isFire -> ZeroDivisionError analitzant la zona: ', zone) i += 1 try: self.results[zone] = [ 'Zona: ' + str(zone), [positives, negatives], [positives_warm, negatives_warm], ini_x, ini_y, fi_x, fi_y ] except ZeroDivisionError: print('ZeroDivisionError: ', positives, negatives, positives, negatives_warm)
def identifyWhiteZones(self): self.picamera.resolution = (640, 480) rawCapture = PiRGBArray(self.picamera) self.picamera.start_preview() self.picamera.framerate = 20 # self move motors, detect limits, go in quadratic zone. for image in self.picamera.capture_continuous(rawCapture, format="bgr"): print('new frame') # Aquesta variable s'actualitza un cop sha detectat zones blanques i s'analitzen en el motion i color. cv2.imwrite('FRAME_ORIGEN.jpg', image.array) image.truncate(0) np_arr_rgb = cv2.imread('FRAME_ORIGEN.jpg', 1) photo = hasel.rgb2hsl(np.uint8(np_arr_rgb)) photo[:, :, :] = np.uint8(photo[:, :, :]) * 255 black = np.uint8(photo[:, :, 2]) R = G = B = 255 size_x = 0 size_y = 0 segment = None ini_x = 9999999 ini_y = 9999999 fi_x = 0 fi_y = 0 cX = 0 cY = 0 max_width = photo.shape[1] max_height = photo.shape[0] max_num_colors = 0 num_colors = 0 start = time.time() last_is_white = False possiblyFire = False num_white_zones = 0 whites_pos_list = [] # Analyse frame by segments, getting only white zones for x in range(int(max_height / self.recognizer_size_x) + 1): for y in range(int(max_width / self.recognizer_size_y) + 1): try: if black[x * self.recognizer_size_x: (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y: (y * self.recognizer_size_y) + self.recognizer_size_y].shape != (max_height, max_width): segment = np.zeros( (photo[x * self.recognizer_size_x: (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y: (y * self.recognizer_size_y) + self.recognizer_size_y].shape[0], photo[x * self.recognizer_size_x: (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y: (y * self.recognizer_size_y) + self.recognizer_size_y].shape[1]), dtype=np.uint8) else: segment = np.zeros((self.recognizer_size_x, self.recognizer_size_y), dtype=np.uint8) segment[:, :] = black[x * self.recognizer_size_x: (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y: (y * self.recognizer_size_y) + self.recognizer_size_y] size_x = segment.shape[0] size_y = segment.shape[1] except ValueError as e: raise e nums = None num_counter = None freq_dic = None nums, num_counter = np.unique(segment[:, :], return_counts=True) freq_dic = dict(zip(nums, num_counter)) try: if freq_dic[255]: if freq_dic[255] >= ( (size_y * size_x) * self.white_tolerance): possiblyFire = True if last_is_white: fi_x = (x * size_x) + size_x fi_y = (y * size_y) + size_y else: if ini_x > (x * size_x): ini_x = x * self.recognizer_size_x if ini_y > (y * size_y): ini_y = y * self.recognizer_size_y whites_pos_list.append([ini_x, ini_y]) fi_x = (x * self.recognizer_size_x) + size_x fi_y = (y * self.recognizer_size_y) + size_y last_is_white = True else: pass except KeyError: if last_is_white: whites_pos_list[-1].append(fi_x) whites_pos_list[-1].append(fi_y) whites_pos_list[-1].append(num_colors) num_colors = 0 num_white_zones += 1 ini_x = 999999 ini_y = 999999 fi_y = 0 fi_x = 0 last_is_white = False else: ini_x = 999999 ini_y = 999999 fi_y = 0 fi_x = 0 if possiblyFire: print('Hi ha: ', num_white_zones, ' objectes blancs') threads = [] res = [None] * (num_white_zones + 1) self.results = [None for i in range(num_white_zones)] # Versio on primer executem la xarxa neuronal per sobre i despres la deteccio de foc LMC en temps real: ############################################################################################## #self.neural_net_th = Thread(target = self._executeNeuralNetwork, args =(self,np_arr_rgb)) #self.neural_net_th.start() # el join del neural net, el fem dins del lmc, aixo vol dir que executem paralelament la network i el lmc. # Executem el LMC per detectar si es foc o no en temps real #print 'Whites: ',whites_pos_list #whites_pos_list = self._getBetterZones(whites_pos_list) #num_new_zones = len(whites_pos_list) #print whites_pos_list #num_items_position = num_new_zones #print 'Better: ',whites_pos_list self.num_white_zones = num_white_zones for zone in range(num_white_zones): try: ini_x = whites_pos_list[zone][0] ini_y = whites_pos_list[zone][1] fi_x = whites_pos_list[zone][2] fi_y = whites_pos_list[zone][3] segment = np.uint8(black[ini_x:fi_x, ini_y:fi_y]) M = cv2.moments(segment) try: cY = int(M["m10"] / M["m00"]) cX = int(M["m01"] / M["m00"]) if cY > ((self.recognizer_size_y / 2) + (self.recognizer_size_y * 0.20)): ini_y += int(self.recognizer_size_y * 0.25) if cY < ((self.recognizer_size_y / 2) + (self.recognizer_size_y * 0.20)): fi_y -= int(self.recognizer_size_y * 0.25) if cX < ((self.recognizer_size_x / 2) + (self.recognizer_size_x * 0.20)): fi_x -= int(self.recognizer_size_y * 0.25) if cX > ((self.recognizer_size_y / 2) + (self.recognizer_size_y * 0.20)): ini_x += int(self.recognizer_size_y * 0.25) except ZeroDivisionError: print( 'IdentifyWhites: ZeroDivisionError analitzant la zona ', zone) if cX == 0 or cY == 0: print( 'El centroide surt 0,0 per tant no analitzem aquesta zona. Deu ser redundant' ) else: # ferho amb threads #th = Thread(target = self.foo, args =()) #time.sleep(0.1) self.run(ini_x, ini_y, fi_x, fi_y, zone, cX, cY) #th = Thread(target = self.run, args =(ini_x,ini_y,fi_x,fi_y,num_colors,zone,cX,cY)) #threads.append(th) #th.start() #th.join() except IndexError: pass #for th in threads: #th.join() #self.neural_net_th.join() self._analyseResults() if self.extinct: print 'dwfoc extingit' cv2.destroyAllWindows() break #cv2.destroyAllWindows() # print ('En este frame no hay fuego') # girar: # No excutem el moure's '''
assert len(greyscale_file_paths) == len(colored_file_paths) for i, greyscale_file_path in enumerate(tqdm(greyscale_file_paths)): greyscale_image = Image.open(greyscale_file_path) greyscale_image_np = np.array(greyscale_image) colored_file_path = colored_file_paths[i] colored_image = Image.open(colored_file_path).resize( greyscale_image.size, resample=Image.LANCZOS ) colored_image_np = np.array(colored_image) # Transfer hue and saturation from the color image to the greyscale image via the # selected color space if args.via_color_space == "hsl": greyscale_image_np_hsl = rgb2hsl(greyscale_image_np) colored_image_np_hsl = rgb2hsl(colored_image_np) greyscale_image_np_hsl[:, :, 0] = colored_image_np_hsl[:, :, 0] # hue greyscale_image_np_hsl[:, :, 1] = colored_image_np_hsl[ :, :, 1 ] # saturation full_res_colored_image = hsl2rgb(greyscale_image_np_hsl) else: greyscale_image_np_hsv = rgb2hsv(greyscale_image_np) colored_image_np_hsv = rgb2hsv(colored_image_np) greyscale_image_np_hsv[:, :, 0] = colored_image_np_hsv[:, :, 0] # hue greyscale_image_np_hsv[:, :, 1] = colored_image_np_hsv[ :, :, 1
def run(self,ini_x,ini_y,fi_x,fi_y,previous_num_whites,zone,cxx,cyy): #self.picamera.resolution = (640,480) rawCapture = PiRGBArray(self.picamera) self.picamera.start_preview() #self.picamera.framerate = 3 tmp_res = [] iterations = 0 for image in self.picamera.capture_continuous(rawCapture, format="bgr"): if iterations == 2: print 'End motion thread zone: ',zone break cv2.imwrite('tmp_isF.jpg',image.array) np_arr_rgb = cv2.imread('tmp_isF.jpg') photo = hasel.rgb2hsl(np_arr_rgb) photo[:,:,:] = photo[:,:,:] * 255 photo[:,:,:] = np.uint8(photo[:,:,:]) black = photo[:,:,2] positives = 0 negatives = 0 frame = 0 warm_results = [] image.truncate(0) max_num_colors = fi_y*fi_x num = 0 cX = 0 cY = 0 segment = black[ini_x:fi_x, ini_y:fi_y] '''freq = None nums, num_counter = np.unique(segment[:,:], return_counts=True) freq = dict(zip(nums, num_counter)) try: num = freq[255] # Provar aquesta comparacio amb un or (abs(previous_num_whites - freq[255])) < previous_num_whites*q if ((abs(previous_num_whites - num)) > previous_num_whites*self.white_tolerance): positives += 1 else: negatives += 1 except KeyError: negatives += 1''' M = cv2.moments(segment) try: cY = int(M["m10"] / M["m00"]) cX = int(M["m01"] / M["m00"]) #print 'Anteriors: ',cxx,cyy,' actuals: ',cX,cY #print 'Diferencia | x: ',abs(cX-cxx),' y: ',abs(cY-cyy) if (abs(cX-cxx) > int(fi_x-ini_x) * self.white_tolerance) : #print 'es mou bastan a les x' positives += 1 else: negatives += 1 #print 'No es mou gaire a les x' if (abs(cY-cyy) > int(fi_y-ini_y) * self.white_tolerance): #print 'es mou bastan a les y' positives += 1 else: #print 'No es mou gaire a les y' negatives += 1 except ZeroDivisionError: print 'isFire -> ZeroDivisionError analitzant la zona: ',zone tmp_res.append(['Zona: '+str(zone),positives,negatives]) self.results_motion_detection[zone].append(['Zona: '+str(zone),positives,negatives]) w_result = self.warmColorDetection(np_arr_rgb,ini_x,ini_y,fi_x,fi_y,previous_num_whites,zone) print 'En aquesta zona hi ha: Motion positius (',positives,')/(',negatives,') i el resultat d warm es: ',w_result iterations+=1
def identifyWhiteZones(self): self.picamera.resolution = (640,480) rawCapture = PiRGBArray(self.picamera) self.picamera.start_preview() self.picamera.framerate = 10 # self move motors, detect limits, go in quadratic zone. for image in self.picamera.capture_continuous(rawCapture, format="bgr"): # Aquesta variable s'actualitza un cop sha detectat zones blanques i s'analitzen en el motion i color. if self.isFire: print 'Hay fuego' print self.results_color_detection print self.results_motion_detection break image.truncate(0) cv2.imwrite('tmp_id.jpg',image.array) np_arr_rgb = cv2.imread('tmp_id.jpg',1) photo = hasel.rgb2hsl(np_arr_rgb) photo[:,:,:] = photo[:,:,:] * 255 photo[:,:,:] = np.uint8(photo[:,:,:]) black = photo[:,:,2] #cv2.imshow('img',np.uint8(photo)) #cv2.waitKey(0) #cv2.destroyAllWindows() R = G = B = 255 size_x = 0 size_y = 0 segment = None ini_x = 9999999 ini_y = 9999999 fi_x = 0 fi_y = 0 cX = 0 cY = 0 max_width = photo.shape[1] max_height = photo.shape[0] max_num_colors = 0 num_colors = 0 start = time.time() last_is_white = False possiblyFire = False num_white_zones = 0 whites_pos_list = [] # Analyse frame by segments, getting only white zones for x in range(int(max_height/self.recognizer_size_x)+1): for y in range(int(max_width/self.recognizer_size_y)+1): try: if black[ x * self.recognizer_size_x : (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y : (y * self.recognizer_size_y) + self.recognizer_size_y].shape != (max_height,max_width): segment = np.zeros((photo[ x * self.recognizer_size_x : (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y : (y * self.recognizer_size_y) + self.recognizer_size_y].shape[0], photo[ x * self.recognizer_size_x : (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y : (y * self.recognizer_size_y) + self.recognizer_size_y].shape[1]),dtype=np.uint8) else: segment = np.zeros((self.recognizer_size_x,self.recognizer_size_y),dtype=np.uint8) segment[:,:] = black[ x * self.recognizer_size_x : (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y : (y * self.recognizer_size_y) + self.recognizer_size_y] size_x = segment.shape[0] size_y = segment.shape[1] #cv2.imshow('img',np_arr_rgb[x * self.recognizer_size_x : (x * self.recognizer_size_x) + self.recognizer_size_x, y * self.recognizer_size_y : (y * self.recognizer_size_y) + self.recognizer_size_y,:]) #cv2.waitKey(0) #cv2.destroyAllWindows() except ValueError as e: raise e nums = None num_counter = None freq_dic = None nums, num_counter = np.unique(segment[:,:], return_counts=True) freq_dic = dict(zip(nums,num_counter)) try: if freq_dic[255]: if freq_dic[255] >= ((size_y*size_x) * self.white_tolerance): num_colors = freq_dic[255] possiblyFire = True if last_is_white: fi_x = (x*size_x) + size_x fi_y = (y*size_y) + size_y else: if ini_x > (x*size_x): ini_x = x*self.recognizer_size_x if ini_y > (y*size_y): ini_y = y*self.recognizer_size_y whites_pos_list.append([ini_x,ini_y]) fi_x = (x*self.recognizer_size_x) + size_x fi_y = (y*self.recognizer_size_y) + size_y last_is_white = True else: pass except KeyError: if last_is_white: whites_pos_list[-1].append(fi_x) whites_pos_list[-1].append(fi_y) whites_pos_list[-1].append(num_colors) num_colors = 0 num_white_zones += 1 ini_x = 999999 ini_y = 999999 fi_y = 0 fi_x = 0 last_is_white = False else: ini_x = 999999 ini_y = 999999 fi_y = 0 fi_x = 0 if possiblyFire: threads = [] res = [None] * num_white_zones self.results_motion_detection = [[] for i in range(num_white_zones)] self.results_color_detection = [[] for i in range(num_white_zones)] print 'White zones: ',num_white_zones for zone in range(num_white_zones): ini_x = whites_pos_list[zone][0] ini_y = whites_pos_list[zone][1] fi_x = whites_pos_list[zone][2] fi_y = whites_pos_list[zone][3] num_colors = whites_pos_list[zone][4] segment = black[ini_x:fi_x,ini_y:fi_y] #print '1.- Identify SHAPE centroide zona: ',zone,' shape: ',segment.shape M = cv2.moments(segment) try: cY = int(M["m10"] / M["m00"]) cX = int(M["m01"] / M["m00"]) except ZeroDivisionError: print 'ZeroDivisionError analitzant la zona ',zone # retall de segments bastant buits utilitzan les coordenades del centroide blanc new_segment = None if cY > ((self.recognizer_size_y / 2) + (self.recognizer_size_y*0.20)): ini_y += int(self.recognizer_size_y*0.25) if cY < ((self.recognizer_size_y / 2) + (self.recognizer_size_y*0.20)): fi_y -= int(self.recognizer_size_y*0.25) if cX < ((self.recognizer_size_x / 2) + (self.recognizer_size_x*0.20)): fi_x -= int(self.recognizer_size_y*0.25) if cX > ((self.recognizer_size_y / 2) + (self.recognizer_size_y*0.20)): ini_x += int(self.recognizer_size_y*0.25) segment = black[ini_x:fi_x,ini_y:fi_y] M = cv2.moments(segment) cv2.imshow('img',np_arr_rgb) cv2.waitKey(0) cv2.destroyAllWindows() try: cY = int(M["m10"] / M["m00"]) cX = int(M["m01"] / M["m00"]) except ZeroDivisionError: print 'Identify: ZeroDivisionError analitzant la zona: ',zone if cX == 0 or cY == 0: print 'El centroide surt 0,0 per tant no analitzem aquesta zona. Deu ser redundant' else: #th = Thread(target = self.foo, args =()) self.run(ini_x,ini_y,fi_x,fi_y,num_colors,zone,cX,cY) #th = Thread(target = self.run, args =(ini_x,ini_y,fi_x,fi_y,num_colors,zone,cX,cY)) #threads.append(th) #th.start() #th.join() #for th in threads: # th.join() else: print 'En este frame no hay fuego'
target_img = np.array(img_pil) small_img_pil = img_pil.resize((256, 256), resample=Image.LANCZOS) blurry_img_pil = small_img_pil.resize(img_pil.size, resample=Image.LANCZOS) blurry_img = np.array(blurry_img_pil) dataset = "training" if i < split_index else "validation" num_examples = 1 if dataset == "training" else 1 for j in range(num_examples): x1, y1, x2, y2 = get_random_coordinates(target_img) target_window = target_img[y1:y2, x1:x2, :] blurry_window = blurry_img[y1:y2, x1:x2, :] img_hsl = rgb2hsl(target_window) blurry_img_hsl = rgb2hsl(blurry_window) # Input image has original lightness channel, but blurry hue and saturation img_hsl[:, :, 0] = blurry_img_hsl[:, :, 0] # hue img_hsl[:, :, 1] = blurry_img_hsl[:, :, 1] # saturation input_window = hsl2rgb(img_hsl) if random.random() > 0.5: # Apply horizontal flip to 50 % of the images input_window = np.fliplr(input_window) target_window = np.fliplr(target_window) Image.fromarray(input_window).save( DATA_DIR / "resolution_enhancer_dataset" / dataset
dominant_light = dominant_dark_and_light_colors[1] if (dominant_dark[0][2] > dominant_light[0][2]): tmp = dominant_light dominant_light = dominant_dark dominant_dark = tmp if config[background_color_param_name] == "dark": bg_color = dominant_dark fg_color = dominant_light elif config[background_color_param_name] == "light": bg_color = dominant_light fg_color = dominant_dark elif config[background_color_param_name][0] == "#": bg_color = hex2rgb(config[background_color_param_name]) bg_color = hasel.rgb2hsl(np.array(bg_color).reshape(1, 1, 3)).reshape(1, 3) if (bg_color[2] < 0.5): fg_color = dominant_light else: fg_color = dominant_dark # TODO the gb colour detection sucks for light colors # TODO adjust the bg color by picking the nearest color cluster to it and assigning it that value # TODO bg color breaks for the isaac example because the black bg is a flat #000000 color that's filtered out # improved_centers = np.vstack((bg_fg_colors, improved_centers)) # dark theme settings min_dark_contrast = 0.4 min_light_contrast = 0.1