def create_app(config_file='config.cfg'): app = Flask(__name__, template_folder=template_dir, static_folder=static_dir, root_path=root_folder) app.config.from_pyfile(config_file) with app.app_context(): init_app_db(app) if app.config['ONLINE']: global mqtt_client print(app.config['DEVICES_TABLE_NAME']) handlers = [ SensorMessageHandler(), RelayMessageHandler(), RelayStatusMessageHandler() ] mqtt_client = init_mqtt(app.config, handlers) global dynamo_db_table dynamo_db_table = init_dynamodb(app.config['DEVICES_TABLE_NAME']) init_task_scheduler(load_all_schedules()) app.register_blueprint(home) app.register_blueprint(lights) app.register_blueprint(sensors) app.register_blueprint(tube_status) app.register_blueprint(rooms) from Filters import Filters Filters().init_filters(app) return app
def OpenFilters(self): winFilters = Filters() winFilters.setWindowModality(Qt.ApplicationModal) winFilters.exec_() self.result = winFilters.GetArray() if (len(self.result) == 0): self.existFilters = False else: self.UpdateTable() self.existFilters = True
def __init__( self, type, name, keyword, platform, toolset ): self._xml = Xml.Element( 'Project', attrib=dict( DefaultTargets = Project.DefaultTargets, ToolsVersion = Project.ToolsVersion, xmlns = Project.xmlns ) ) self._id = ID.generate() self._type = type self._toolset = toolset self._name = name self._keyword = keyword self._platform = platform self._dependencies = [] self._filters = Filters( Project ) self._configurations = [] self._sourceFiles = [] self._headerFiles = []
def __init__(self): # If video source is USB device for testing, we will use the vs and cap variables below. #self.vs = WebcamVideoStream(src=0).start() # so we want to read video in as a stream now so we can self.capture = cv2.VideoCapture(0) # CHANGE MILES # 0 for drone, # 1 for webcam in the case of Miles computer # If video source is drone, we will use the code below. self.fourcc = cv2.cv.CV_FOURCC(*'XVID') self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480)) # So we want to open application for video driver first, and then run file. # Currently the webcam video stream class does not work for video capture, therefore we # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working. # Instantiate objects self.filters = Filters() # Filters for filtering the file. self.motionDetection = MotionDetection( ) # MotionDetection for grabbing motion. self.cascadeDetection = Cascading( ) # Cascading for feature recognition. self.blurDetection = DetectBlur( 150) # 100 would be the value to be used for fine tuning. self.destroyWindows = WindowDestruction() time.sleep(0.25) # Allow camera a few miliseconds for booting up. self.firstFrame = None # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change. # Initiate toggles. self.motionTime = False self.cascadeTime = False self.blurDetectionTime = False # Initialize external variables. self.numFrames = 0 self.ts = time.time()
cvz=myFFT3Dfield.GetFFT3Dfield(vz,lx,ly,lz,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() if(my_id==0): sys.stdout.write('3 R-C FFTs cost: {0:.2f} seconds\n'.format(t2-t1)) ############################# unfiltered ############################# ## Get energy spectrum in Fourier space comm.Barrier(); t1=MPI.Wtime() ek_all=myEnergySpc.GetSpectrumFromComplexField(cvx,cvy,cvz,k2,lx,ly,lz,nek,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() ############################# kappa_cutoff=100 ############################# kappa_c=100.0 ## Filter the velocity field using the GAUSSIAN filter myFilter=Filters() cvx1=myFilter.FilterTheComplexField(cvx,k2,kappa_c,'gaussian') cvy1=myFilter.FilterTheComplexField(cvy,k2,kappa_c,'gaussian') cvz1=myFilter.FilterTheComplexField(cvz,k2,kappa_c,'gaussian') ## Get energy spectrum in Fourier space comm.Barrier(); t1=MPI.Wtime() ek_gaussian=myEnergySpc.GetSpectrumFromComplexField(cvx1,cvy1,cvz1,k2,lx,ly,lz,nek,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() del cvx1 del cvy1 del cvz1 ## Filter the velocity field using the SHARP filter myFilter=Filters() cvx1=myFilter.FilterTheComplexField(cvx,k2,kappa_c,'sharp')
def deal_filter(self): res = Filters(self._vars).process() self._vars.update(res)
def run(): print("***RUNNING***") cutoff = setCutoff.get() order = setOrder.get() width = setWidth.get() # Load image print("Uploading " + img) image = cv2.imread(img, 0) # Timer Start start = time.time() print("Starting Timer") # Filter Image obj = Filters(image, filter, cutoff, order, width) out = obj.FFT() # Timer End end = time.time() print("Timer Stopped") t = float("{0:.3f}".format(end - start)) print("Total Time = ", t) # clear old plots plt.clf() # Image display a1 = fig.add_subplot(221) a1.imshow(image, cmap='binary_r') a1.axis('off') a1.set_title("Original Image") # DFT graph a2 = fig.add_subplot(222) a2.imshow(out[0], cmap='binary_r') a2.xaxis.set_visible(False) a2.yaxis.set_visible(False) a2.set_title("Magnitude DFT") # Mask graph a3 = fig.add_subplot(223) a3.imshow(out[1], cmap='binary_r') a3.set_facecolor('k') a3.xaxis.set_visible(False) a3.yaxis.set_visible(False) a3.set_title("Mask") # Resulting Image display """if there is a value inside of the Weight field, the program assumes to use unsharp, otherwise it uses original filter""" if is_number(setWeight.get()): # output_dir = 'output/' # output_image_name = output_dir + "_" + datetime.now().strftime("%m%d-%H%M%S") + ".jpg" outName = "output/result.png" image1 = np.int32(image) image2 = np.int32(out[2]) diff = image1 - image2 unsharpImage = (image + (float(setWeight.get()) * diff)) cv2.imwrite(outName, unsharpImage) # writes the image first, then displays the result. Doing this the other way around causes imshow to display a different image resultImage = cv2.imread('output/result.png', 0) a4 = fig.add_subplot(224) a4.imshow(resultImage, cmap='binary_r') a4.set_title("Filtered Image") else: a4 = fig.add_subplot(224) a4.imshow(out[2], cmap='binary_r') a4.axis('off') a4.set_title("Filtered Image") fig.tight_layout() canvas = FigureCanvasTkAgg(fig, master=window) canvas.get_tk_widget().grid(row=2, columnspan=6) # print time t1 = str(t) msg = "Time Elapsed: " + t1 Label(window, text=msg, font=("Times", 15), fg="red").grid(row=3, sticky=NE) canvas.draw()
from PIL import Image from PIL.ImageQt import ImageQt import urllib.request import numpy as np import webbrowser import random sys.setrecursionlimit(5000) # My Libraries from Filters import Filters from MessageBox import MessageBox from ImportFile import ImportFile # Global class that calls filter methods filters = Filters() path_to_image = "Images/" # This function calculate the path where to find icons # after pyinstaller export def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path)
from Population import Population from Imagem import Imagem from Filters import Filters f = Filters() img = Imagem() img.open("Imagens/monalisa.jpg") img.greyScale() img.load() matrix = f.binary(img.toMatrix(), 100) img.setMatrix(matrix) img.save("Imagens/monalisagrey.jpg") solve = img.vectorize() values = [0, 255] population_size = 100 chromosome_size = 8 cross_rate = 1.0 mutation_rate = 0.05 generations = 10000 times = int(65536 / chromosome_size) def func(chromosome, solution): i = 0 hit = 0 for g in chromosome: if (g == solution[i]): hit += 1
with open(html_lineChart_path, 'w') as f: f.write(html_lineChart) with open(html_pieChart_path, 'w') as f: f.write(html_pieChart) class Outputs(ConsoleOutputs, FileOutputs, VisualizationOutputs): def __init__(self, filters_res): super(Outputs, self).__init__() self._vars = filters_res self.res = self.vars_dict.get('RESULT') def process(self): if self.vars_dict.get('TO_CONSOLE'): self.to_console(self.res) if self.vars_dict.get('TO_FILE'): self.to_file(self.res) if self.vars_dict.get('DATA_VISUALIZATION'): self.to_visual_html() def process_line_chart(self): self.to_visual_html() if __name__ == '__main__': from Inputs import Inputs from Filters import Filters filter_res = Filters(Inputs().process()).process() Outputs(filter_res).process()
def run(): print("***RUNNING***") if (setFFT.get() == 0): print("Using Built-in FFT") if (setFFT.get() == 1): print("Using own FFT") cutoff = setCutoff.get() order = setOrder.get() width = setWidth.get() weight = setWeight.get() x_val = setX.get() y_val = setY.get() whichFFT = setFFT.get() # Load image print("Uploading " + img) image = cv2.imread(img, 0) # Timer Start start = time.time() print("Starting Timer") # Filter Image obj = Filters(image, filter, cutoff, order, width, weight, x_val, y_val, whichFFT) out = obj.FFT() # Timer End end = time.time() print("Timer Stopped") t = float("{0:.3f}".format(end - start)) print("Total Time = ", t) # clear old plots plt.clf() # Image display a1 = fig.add_subplot(221) a1.imshow(image, cmap='binary_r') a1.axis('off') a1.set_title("Original Image") # DFT graph a2 = fig.add_subplot(222) a2.imshow(out[0], cmap='binary_r') a2.xaxis.set_visible(False) a2.yaxis.set_visible(False) a2.set_title("Magnitude DFT") # Mask graph a3 = fig.add_subplot(223) a3.imshow(out[1], cmap='binary_r') a3.set_facecolor('k') a3.xaxis.set_visible(False) a3.yaxis.set_visible(False) a3.set_title("Mask") # Resulting Image display a4 = fig.add_subplot(224) a4.imshow(out[2], cmap='binary_r') a4.axis('off') a4.set_title("Filtered Image") fig.tight_layout() canvas = FigureCanvasTkAgg(fig, master=window) canvas.get_tk_widget().grid(row=2, columnspan=6) output_dir = 'output/' output_image_name = output_dir + "_dft_filter_" + datetime.now().strftime( "%m%d-%H%M%S") + ".jpg" cv2.imwrite(output_image_name, out[2]) # print time t1 = str(t) msg = "Time Elapsed: " + t1 Label(window, text=msg, font=("Times", 15), fg="red").grid(row=3, sticky=NE) canvas.draw()