def optionFilters(option): img = cv2.imread(Options.getFilename("flor"), 0) if option == "media": return Filters.media(img, 3) if option == "gaussiano": return Filters.gaussian(img) if option == "mediana": return Filters.medianFilter(img, 3)
def __init__( self, type, name, keyword, platform, toolset ): self._xml = Xml.Element( 'Project', attrib=dict( DefaultTargets = Project.DefaultTargets, ToolsVersion = Project.ToolsVersion, xmlns = Project.xmlns ) ) self._id = ID.generate() self._type = type self._toolset = toolset self._name = name self._keyword = keyword self._platform = platform self._dependencies = [] self._filters = Filters( Project ) self._configurations = [] self._sourceFiles = [] self._headerFiles = []
def create_app(config_file='config.cfg'): app = Flask(__name__, template_folder=template_dir, static_folder=static_dir, root_path=root_folder) app.config.from_pyfile(config_file) with app.app_context(): init_app_db(app) if app.config['ONLINE']: global mqtt_client print(app.config['DEVICES_TABLE_NAME']) handlers = [ SensorMessageHandler(), RelayMessageHandler(), RelayStatusMessageHandler() ] mqtt_client = init_mqtt(app.config, handlers) global dynamo_db_table dynamo_db_table = init_dynamodb(app.config['DEVICES_TABLE_NAME']) init_task_scheduler(load_all_schedules()) app.register_blueprint(home) app.register_blueprint(lights) app.register_blueprint(sensors) app.register_blueprint(tube_status) app.register_blueprint(rooms) from Filters import Filters Filters().init_filters(app) return app
def OpenFilters(self): winFilters = Filters() winFilters.setWindowModality(Qt.ApplicationModal) winFilters.exec_() self.result = winFilters.GetArray() if (len(self.result) == 0): self.existFilters = False else: self.UpdateTable() self.existFilters = True
def __init__(self): # If video source is USB device for testing, we will use the vs and cap variables below. #self.vs = WebcamVideoStream(src=0).start() # so we want to read video in as a stream now so we can self.capture = cv2.VideoCapture(0) # CHANGE MILES # 0 for drone, # 1 for webcam in the case of Miles computer # If video source is drone, we will use the code below. self.fourcc = cv2.cv.CV_FOURCC(*'XVID') self.out = cv2.VideoWriter('output.mov', self.fourcc, 20.0, (640, 480)) # So we want to open application for video driver first, and then run file. # Currently the webcam video stream class does not work for video capture, therefore we # need to stick to cv2.VideoCapture() until WebcamVideoStream can be optimized for working. # Instantiate objects self.filters = Filters() # Filters for filtering the file. self.motionDetection = MotionDetection( ) # MotionDetection for grabbing motion. self.cascadeDetection = Cascading( ) # Cascading for feature recognition. self.blurDetection = DetectBlur( 150) # 100 would be the value to be used for fine tuning. self.destroyWindows = WindowDestruction() time.sleep(0.25) # Allow camera a few miliseconds for booting up. self.firstFrame = None # First frame is a variable to be used for motion tracking, firstFrame is the frame being compared for motion change. # Initiate toggles. self.motionTime = False self.cascadeTime = False self.blurDetectionTime = False # Initialize external variables. self.numFrames = 0 self.ts = time.time()
cvz=myFFT3Dfield.GetFFT3Dfield(vz,lx,ly,lz,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() if(my_id==0): sys.stdout.write('3 R-C FFTs cost: {0:.2f} seconds\n'.format(t2-t1)) ############################# unfiltered ############################# ## Get energy spectrum in Fourier space comm.Barrier(); t1=MPI.Wtime() ek_all=myEnergySpc.GetSpectrumFromComplexField(cvx,cvy,cvz,k2,lx,ly,lz,nek,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() ############################# kappa_cutoff=100 ############################# kappa_c=100.0 ## Filter the velocity field using the GAUSSIAN filter myFilter=Filters() cvx1=myFilter.FilterTheComplexField(cvx,k2,kappa_c,'gaussian') cvy1=myFilter.FilterTheComplexField(cvy,k2,kappa_c,'gaussian') cvz1=myFilter.FilterTheComplexField(cvz,k2,kappa_c,'gaussian') ## Get energy spectrum in Fourier space comm.Barrier(); t1=MPI.Wtime() ek_gaussian=myEnergySpc.GetSpectrumFromComplexField(cvx1,cvy1,cvz1,k2,lx,ly,lz,nek,nproc,my_id) comm.Barrier(); t2=MPI.Wtime() del cvx1 del cvy1 del cvz1 ## Filter the velocity field using the SHARP filter myFilter=Filters() cvx1=myFilter.FilterTheComplexField(cvx,k2,kappa_c,'sharp')
from Cascading import Cascading from Detect_Blur import DetectBlur from compressImages import ImageCompression # For this program I'm testing the use of thresholding by applying different filters # and seeing how easy it is to detect corners and objects within the camera frame. ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="path to the json configuration file") args = vars(ap.parse_args()) vs = WebcamVideoStream(src=0).start() # so we want to read video in as a stream now so we can #cap = cv2.VideoCapture(0) filters = Filters() cascades = Cascading() blurDetection = DetectBlur(120) imgCmpr = ImageCompression() conf = json.load(open(args["conf"])) # Load the json file. client = None # check to see if the Dropbox should be used if conf["use_dropbox"]: # connect to dropbox and start the session authorization process flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"]) print "[INFO] Authorize this application: {}".format(flow.start()) authCode = raw_input("Enter auth code here: ").strip()
def deal_filter(self): res = Filters(self._vars).process() self._vars.update(res)
def run(): print("***RUNNING***") cutoff = setCutoff.get() order = setOrder.get() width = setWidth.get() # Load image print("Uploading " + img) image = cv2.imread(img, 0) # Timer Start start = time.time() print("Starting Timer") # Filter Image obj = Filters(image, filter, cutoff, order, width) out = obj.FFT() # Timer End end = time.time() print("Timer Stopped") t = float("{0:.3f}".format(end - start)) print("Total Time = ", t) # clear old plots plt.clf() # Image display a1 = fig.add_subplot(221) a1.imshow(image, cmap='binary_r') a1.axis('off') a1.set_title("Original Image") # DFT graph a2 = fig.add_subplot(222) a2.imshow(out[0], cmap='binary_r') a2.xaxis.set_visible(False) a2.yaxis.set_visible(False) a2.set_title("Magnitude DFT") # Mask graph a3 = fig.add_subplot(223) a3.imshow(out[1], cmap='binary_r') a3.set_facecolor('k') a3.xaxis.set_visible(False) a3.yaxis.set_visible(False) a3.set_title("Mask") # Resulting Image display """if there is a value inside of the Weight field, the program assumes to use unsharp, otherwise it uses original filter""" if is_number(setWeight.get()): # output_dir = 'output/' # output_image_name = output_dir + "_" + datetime.now().strftime("%m%d-%H%M%S") + ".jpg" outName = "output/result.png" image1 = np.int32(image) image2 = np.int32(out[2]) diff = image1 - image2 unsharpImage = (image + (float(setWeight.get()) * diff)) cv2.imwrite(outName, unsharpImage) # writes the image first, then displays the result. Doing this the other way around causes imshow to display a different image resultImage = cv2.imread('output/result.png', 0) a4 = fig.add_subplot(224) a4.imshow(resultImage, cmap='binary_r') a4.set_title("Filtered Image") else: a4 = fig.add_subplot(224) a4.imshow(out[2], cmap='binary_r') a4.axis('off') a4.set_title("Filtered Image") fig.tight_layout() canvas = FigureCanvasTkAgg(fig, master=window) canvas.get_tk_widget().grid(row=2, columnspan=6) # print time t1 = str(t) msg = "Time Elapsed: " + t1 Label(window, text=msg, font=("Times", 15), fg="red").grid(row=3, sticky=NE) canvas.draw()
from PIL import Image from PIL.ImageQt import ImageQt import urllib.request import numpy as np import webbrowser import random sys.setrecursionlimit(5000) # My Libraries from Filters import Filters from MessageBox import MessageBox from ImportFile import ImportFile # Global class that calls filter methods filters = Filters() path_to_image = "Images/" # This function calculate the path where to find icons # after pyinstaller export def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path)
from Population import Population from Imagem import Imagem from Filters import Filters f = Filters() img = Imagem() img.open("Imagens/monalisa.jpg") img.greyScale() img.load() matrix = f.binary(img.toMatrix(), 100) img.setMatrix(matrix) img.save("Imagens/monalisagrey.jpg") solve = img.vectorize() values = [0, 255] population_size = 100 chromosome_size = 8 cross_rate = 1.0 mutation_rate = 0.05 generations = 10000 times = int(65536 / chromosome_size) def func(chromosome, solution): i = 0 hit = 0 for g in chromosome: if (g == solution[i]): hit += 1
import datetime from Filters import Filters # This should allow us to import the Filters file. from WebcamVideoStream import WebcamVideoStream from Cascading import Cascading from Detect_Blur import DetectBlur from compressImages import ImageCompression #from MotionDetection import MotionDetection from WindowDestruction import WindowDestruction from MotionDetection import MotionDetection # For this program I'm testing the use of thresholding by applying different filters # and seeing how easy it is to detect corners and objects within the camera frame. vs = WebcamVideoStream(src=0).start() # so we want to read video in as a stream now so we can #cap = cv2.VideoCapture(0) filters = Filters() fourcc = cv2.cv.CV_FOURCC(*'XVID') out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480)) motion = MotionDetection() destroyWindows = WindowDestruction() firstFrame = None while True: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() #saveFrame = frame # For storing a copy for encoding later on. #frame = cv2.resize(frame, (500, 500))
with open(html_lineChart_path, 'w') as f: f.write(html_lineChart) with open(html_pieChart_path, 'w') as f: f.write(html_pieChart) class Outputs(ConsoleOutputs, FileOutputs, VisualizationOutputs): def __init__(self, filters_res): super(Outputs, self).__init__() self._vars = filters_res self.res = self.vars_dict.get('RESULT') def process(self): if self.vars_dict.get('TO_CONSOLE'): self.to_console(self.res) if self.vars_dict.get('TO_FILE'): self.to_file(self.res) if self.vars_dict.get('DATA_VISUALIZATION'): self.to_visual_html() def process_line_chart(self): self.to_visual_html() if __name__ == '__main__': from Inputs import Inputs from Filters import Filters filter_res = Filters(Inputs().process()).process() Outputs(filter_res).process()
from compressImages import ImageCompression # For this program I'm testing the use of thresholding by applying different filters # and seeing how easy it is to detect corners and objects within the camera frame. ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="path to the json configuration file") args = vars(ap.parse_args()) vs = WebcamVideoStream( src=0).start() # so we want to read video in as a stream now so we can #cap = cv2.VideoCapture(0) filters = Filters() cascades = Cascading() blurDetection = DetectBlur(120) imgCmpr = ImageCompression() conf = json.load(open(args["conf"])) # Load the json file. client = None # check to see if the Dropbox should be used if conf["use_dropbox"]: # connect to dropbox and start the session authorization process flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"]) print "[INFO] Authorize this application: {}".format(flow.start()) authCode = raw_input("Enter auth code here: ").strip()
def run(): print("***RUNNING***") if (setFFT.get() == 0): print("Using Built-in FFT") if (setFFT.get() == 1): print("Using own FFT") cutoff = setCutoff.get() order = setOrder.get() width = setWidth.get() weight = setWeight.get() x_val = setX.get() y_val = setY.get() whichFFT = setFFT.get() # Load image print("Uploading " + img) image = cv2.imread(img, 0) # Timer Start start = time.time() print("Starting Timer") # Filter Image obj = Filters(image, filter, cutoff, order, width, weight, x_val, y_val, whichFFT) out = obj.FFT() # Timer End end = time.time() print("Timer Stopped") t = float("{0:.3f}".format(end - start)) print("Total Time = ", t) # clear old plots plt.clf() # Image display a1 = fig.add_subplot(221) a1.imshow(image, cmap='binary_r') a1.axis('off') a1.set_title("Original Image") # DFT graph a2 = fig.add_subplot(222) a2.imshow(out[0], cmap='binary_r') a2.xaxis.set_visible(False) a2.yaxis.set_visible(False) a2.set_title("Magnitude DFT") # Mask graph a3 = fig.add_subplot(223) a3.imshow(out[1], cmap='binary_r') a3.set_facecolor('k') a3.xaxis.set_visible(False) a3.yaxis.set_visible(False) a3.set_title("Mask") # Resulting Image display a4 = fig.add_subplot(224) a4.imshow(out[2], cmap='binary_r') a4.axis('off') a4.set_title("Filtered Image") fig.tight_layout() canvas = FigureCanvasTkAgg(fig, master=window) canvas.get_tk_widget().grid(row=2, columnspan=6) output_dir = 'output/' output_image_name = output_dir + "_dft_filter_" + datetime.now().strftime( "%m%d-%H%M%S") + ".jpg" cv2.imwrite(output_image_name, out[2]) # print time t1 = str(t) msg = "Time Elapsed: " + t1 Label(window, text=msg, font=("Times", 15), fg="red").grid(row=3, sticky=NE) canvas.draw()
class Project: ToolsVersion = "4.0" DefaultTargets = "Build" xmlns = "http://schemas.microsoft.com/developer/msbuild/2003" # ctor def __init__( self, type, name, keyword, platform, toolset ): self._xml = Xml.Element( 'Project', attrib=dict( DefaultTargets = Project.DefaultTargets, ToolsVersion = Project.ToolsVersion, xmlns = Project.xmlns ) ) self._id = ID.generate() self._type = type self._toolset = toolset self._name = name self._keyword = keyword self._platform = platform self._dependencies = [] self._filters = Filters( Project ) self._configurations = [] self._sourceFiles = [] self._headerFiles = [] # name @property def name( self ): return self._name # uid @property def uid( self ): return self._id # dependencies @property def dependencies( self ): return self._dependencies # filters @property def filters( self ): return self._filters # addDependency def addDependency( self, project ): self._dependencies.append( project ) # createConfiguration def createConfiguration( self, name, settings ): return Configuration( name, settings, self._platform ) # setConfigurations def setConfigurations( self, configurations ): self._configurations = configurations # addHeaderFiles def addHeaderFiles( self, files ): self._headerFiles = self._headerFiles + files # addSourceFiles def addSourceFiles( self, files ): self._sourceFiles = self._sourceFiles + files # serialize def serialize( self, fileName ): self._createXml() str = Xml.tostring( self._xml, 'utf-8' ) reparsed = minidom.parseString( str ) with open( fileName, 'wt' ) as fh: fh.write( reparsed.toprettyxml( indent = " ", encoding = 'utf-8') ) fh.close() self._filters.serialize( fileName + '.filters' ) # _createXml def _createXml( self ): # Add project configurations group configurationsGroup = ProjectConfigurations( self._xml ) [configurationsGroup.add( cfg ) for cfg in self._configurations] # Source files group = ItemGroup( self._xml ) [group.addSource( file ) for file in self._sourceFiles] # Header files group = ItemGroup( self._xml ) [group.addInclude( file ) for file in self._headerFiles] # Add globals globals = PropertyGroup( self._xml, None, Label = 'Globals' ) globals.set( 'ProjectGuid', self._id ) globals.set( 'Keyword',self._keyword ) globals.set( 'RootNamespace', self._name ) # Add imports Group( self._xml, 'Import', dict( Project = '$(VCTargetsPath)\Microsoft.Cpp.Default.props' ) ) # Add property groups for cfg in self._configurations: group = PropertyGroup( self._xml, 'PropertyGroup', Condition = cfg.condition, Label = 'Configuration' ) group.setProperties( dict( ConfigurationType = self._type, UseDebugLibraries = True if cfg.name == 'Debug' else False, CharacterSet = 'MultiByte', PlatformToolset = self._toolset ) ) # Add import properties Group( self._xml, 'Import', dict( Project = '$(VCTargetsPath)\Microsoft.Cpp.props' ) ) # Add property sheets for cfg in self._configurations: PropertySheets( self._xml, cfg ) # Add item definition groups for cfg in self._configurations: definition = PropertyGroup( self._xml, 'ItemDefinitionGroup', Condition = cfg.condition ) definition.setProperties( cfg.settings ) # Add import targets Group( self._xml, 'Import', dict( Project = '$(VCTargetsPath)\Microsoft.Cpp.targets' ) )