def test_filter_simple(self): res = [ '', '', '', 'cow', 'siamese', 'wonderland', 'foo', 'toothpaste', '', '', '', 'milky', 'flight-manual', 'toothpick', '', None, '', '', ] generated = [] filter = Filter(options) for word in words: generated.append(filter.filter(word)) self.assertEqual(generated, res)
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc): self.n = nelx * nely self.opt = nlopt.opt(nlopt.LD_MMA, self.n) self.passive = bc.get_passive_elements() self.xPhys = np.ones(self.n) if self.passive is not None: self.xPhys[self.passive] = 0 # set bounds ub = np.ones(self.n, dtype=float) self.opt.set_upper_bounds(ub) lb = np.zeros(self.n, dtype=float) self.opt.set_lower_bounds(lb) # set stopping criteria self.opt.set_maxeval(2000) self.opt.set_ftol_rel(0.001) # set objective and constraint functions self.opt.set_min_objective(self.compliance_function) self.opt.add_inequality_constraint(self.volume_function, 0) # setup filter self.ft = ft self.filtering = Filter(nelx, nely, rmin) # setup problem def self.init_problem(nelx, nely, penal, bc) self.volfrac = volfrac # set GUI callback self.init_gui(gui)
def text_reply(msg): print msg content = msg['Content'] if msg['FromUserName'] == msg['ToUserName']: print content WORD_CLEAR = u"自助回复清除成功" WORD_ADD = u"自助回复增加成功" WORD_UPDATE = u"自助回复更新成功" if content == "QC": #清除 AUTO_REPLY.clear() itchat.send('%s' % (WORD_CLEAR), msg['FromUserName']) return sub = content.split("Q") if len(sub) == 3: q = sub[1] a = sub[2] AUTO_REPLY[q] = a itchat.send('%s' % (WORD_ADD), msg['FromUserName']) return if msg['Type'] == 'Text': if AUTO_REPLY.has_key(content): itchat.send('%s' % (AUTO_REPLY[content]), msg['FromUserName']) return if msg['Type'] == 'Sharing': imgFilter = Filter() img_list = imgFilter.getContents(msg['Url']) i=0 for img in img_list: print img_list,img,i i = i+1 itchat.send('@img@%s' % str(i)+".gif" ,msg['FromUserName'])
def Initialize(credentials=None, opt_url=None): """Initialize the EE library. If this hasn't been called by the time any object constructor is used, it will be called then. If this is called a second time with a different URL, this doesn't do an un-initialization of e.g.: the previously loaded Algorithms, but will overwrite them and let point at alternate servers. Args: credentials: OAuth2 credentials. opt_url: The base url for the EarthEngine REST API to connect to. """ data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url) # Initialize the dynamically loaded functions on the objects that want them. ApiFunction.initialize() Element.initialize() Image.initialize() Feature.initialize() Collection.initialize() ImageCollection.initialize() FeatureCollection.initialize() Filter.initialize() Geometry.initialize() List.initialize() Number.initialize() String.initialize() Date.initialize() Dictionary.initialize() _InitializeGeneratedClasses() _InitializeUnboundMethods()
def delLogPath(self, path): if not self.containsLogPath(path): logSys.error(path + " is not monitored") else: self.monitor.stop_watch(path) Filter.delLogPath(self, path) logSys.info("Removed logfile = %s" % path)
def test_emoji(self): app = Filter() app.blocks = ['🚫'] sentences = [ 'orospu çocuğu yarak gibi kitap', 'orrrrrooooossssspuuuu evladı', 'am gibi kitap', 'sik gibi kitap', 'Sikimi ye Booooookkkk gibi kitap', ] results = [ '🚫 çocuğu 🚫 gibi kitap', '🚫 evladı', '🚫 gibi kitap', '🚫 gibi kitap', '🚫 ye 🚫 gibi kitap', ] news = [] for sentence in sentences: news.append(app.replace(sentence)) self.assertEqual(news, results)
def threshold(self): current_image = self.result_image if self.stack_filters else self.input_image threshold_args = {} threshold_args['thresh_value'] = mbox('Threshold cuttof value:', entry=True) if threshold_args['thresh_value']: threshold_args['defaults'] = mbox('Use defaults?', ('yes', 'y'), ('no', 'n')) if threshold_args['defaults'] == 'n': threshold_args['max_value'] = mbox('Max value:', entry=True) threshold_args['thresh_type'] = mbox('Threshold type:', entry=True) # mbox(threshold_args, frame=False) self.undo_stack.append(self.result_image) if threshold_args['defaults'] == 'n': self.result_image = Filter.threshold( current_image, int(threshold_args['thresh_value']), int(threshold_args['max_value']), int(threshold_args['thresh_type'])) else: self.result_image = Filter.threshold( current_image, int(threshold_args['thresh_value'])) self.show_image()
def highpass(self): current_image = self.result_image if self.stack_filters else self.input_image self.undo_stack.append(self.result_image) highpass_args = {} highpass_args['defaults'] = mbox('Use defaults?', ('yes', 'y'), ('no', 'n')) if highpass_args['defaults'] == 'n': highpass_args['type'] = mbox('High Pass Type', ('Basic', 0), ('Original Factor', 1)) highpass_args['sigma'] = mbox('Sigma:', entry=True) if highpass_args['type'] == 1: highpass_args['reinforcement_factor'] = mbox( 'Reinforcement Factor:', entry=True) # mbox(highpass_args, frame=False) if highpass_args['defaults'] == 'n' and not highpass_args['sigma']: return if highpass_args['defaults'] == 'n': self.result_image = Filter.highpass( current_image, highpass_args['type'], int(highpass_args['sigma']), float(highpass_args['reinforcement_factor'])) else: self.result_image = Filter.highpass(current_image) self.show_image()
def objective(args): args['ksize'] = int(args['ksize']) filter = Filter(average_disparity, frame_down_factor, mem_down_factor, fovea_shape, frame_shape, values, verbose=False) filter.params = dict(args) costs = [] for i in range(source.n_frames): frame = [ downsample(source.video[i][0], frame_down_factor), downsample(source.video[i][1], frame_down_factor) ] disp, fovea_corner = filter.process_frame(source.positions[i], frame) true_disp = downsample(source.ground_truth[i], frame_down_factor) costs.append(cost(disp[:, values:], true_disp, average_disparity)) mean_cost = np.mean(costs) print(mean_cost, args) return mean_cost
def _generateArtifactList(options): # load configuration logging.info("Loading configuration...") config = Configuration() config.load(options) # build list logging.info("Building artifact list...") listBuilder = ArtifactListBuilder(config) artifactList = listBuilder.buildList() logging.debug("Generated list contents:") for gat in artifactList: priorityList = artifactList[gat] for priority in priorityList: versionList = priorityList[priority] for version in versionList: logging.debug(" %s:%s", gat, version) #filter list logging.info("Filtering artifact list...") listFilter = Filter(config) artifactList = listFilter.filter(artifactList) logging.debug("Filtered list contents:") for gat in artifactList: priorityList = artifactList[gat] for priority in priorityList: versionList = priorityList[priority] for version in versionList: logging.debug(" %s:%s", gat, version) logging.info("Artifact list generation done") return artifactList
def run(self): timer = Timer() timer.start() # Create list of search urls search_urls = [] filter = Filter(self.filter, self.startpage, self.maxpages) for page in range(self.startpage, self.startpage + self.maxpages): search_urls.append(filter.create_filter_url((page - 1) * 10)) # Create pool of worker threads pool = ThreadPool(4) # Open the urls in their own threads organisaties = pool.map(unwrap_self_process_search, zip([self] * len(search_urls), search_urls)) pool.close() pool.join() results = {} results["organisaties"] = self.consolidate(organisaties) timer.stop() results["stats"] = { "exectime": timer.exectime(), "matches": { "total": str(self.search_results["results"]), "pages": str(self.search_results["pages"]) }, "read": { "page_from": str(self.startpage), "page_to": str(self.maxpages) } } return results
def config_analysis( alg_list ) : filt_event = Filter('FilterTauEvent') filt_event.cut_nTau = '== 1' filt_event.cut_tauStatus = ' == 3' alg_list.append(filt_event)
def peakPointers(self, signals, window_size, threshold, reverse=False): """ locates & count all peak / bottom pointers return a tuple (num_of_peaks, array_of_peak_indices) """ # first normalize signals # normalize all peak / lows ranges #norm_signals = self.guassianNormalize(signals) # med-filter to give away noises & sharp peaks med_filterd_signals = Filter.medfilter(signals, window_size) # caculate residual, only reserve candidate peaks peak_candidates = signals - med_filterd_signals # plt.plot(peak_candidates) # non-max-suppress, reserve one highest pointers for peaks peaks = Filter.nms(peak_candidates, window_size, reverse) # plt.plot(peaks) # threshold check, remove suspecious peaks idx = np.arange(0, len(signals)) if not reverse: masks = peaks > threshold else: masks = peaks < threshold peak_idx = idx[masks] num_of_peaks = masks.sum() return peak_idx
def addLogPath(self, path): if self.containsLogPath(path): logSys.error(path + " already exists") else: self.monitor.watch_file(path, self.callback) Filter.addLogPath(self, path) logSys.info("Added logfile = %s" % path)
def __init__(self, jail): Filter.__init__(self, jail) self.__modified = False self.__lastModTime = dict() self.__file404Cnt = dict() logSys.info("Created FilterPoll")
def test_deletion(self): app = Filter() sentences = [ 'orospu çocuğu yarak gibi kitap', 'orrrrrooooossssspuuuu evladı', 'am gibi kitap', 'sik gibi kitap', 'Sikimi ye Booooookkkk gibi kitap', 'güzel kitap çok sevdim', 'resmen mükemmel', ] results = [ 'güzel kitap çok sevdim', 'resmen mükemmel', ] news = [] for sentence in sentences: result = app.detect(sentence) if result: news.append(result) self.assertEqual(news, results)
def Initialize(credentials="persistent", opt_url=None): """Initialize the EE library. If this hasn't been called by the time any object constructor is used, it will be called then. If this is called a second time with a different URL, this doesn't do an un-initialization of e.g.: the previously loaded Algorithms, but will overwrite them and let point at alternate servers. Args: credentials: OAuth2 credentials. 'persistent' (default) means use credentials already stored in the filesystem, or raise an explanatory exception guiding the user to create those credentials. opt_url: The base url for the EarthEngine REST API to connect to. """ if credentials == "persistent": credentials = _GetPersistentCredentials() data.initialize(credentials, (opt_url + "/api" if opt_url else None), opt_url) # Initialize the dynamically loaded functions on the objects that want them. ApiFunction.initialize() Element.initialize() Image.initialize() Feature.initialize() Collection.initialize() ImageCollection.initialize() FeatureCollection.initialize() Filter.initialize() Geometry.initialize() List.initialize() Number.initialize() String.initialize() Date.initialize() Dictionary.initialize() Terrain.initialize() _InitializeGeneratedClasses() _InitializeUnboundMethods()
def test_filter_finish_same_as_stop(self): words = [ 'cow', 'foo', 'fish', 'flamingo', 'trampoline', 'apollo', 'cow', 'pancakes', ] options = {'start': 'cow', 'stop': 'Flamingo', 'finish': 'flamingo'} res = [ '', 'foo', 'fish', None, '', '', '', 'pancakes', ] w = Filter(options) filtered = [] for word in words: filtered.append(w.filter(word)) self.assertEqual(res, filtered)
def main(): distance = '30' zip_ = '80223' min_price = '300' max_price = '1500' has_pic = '1' # 0 to disable bundle = '1' # 0 to disable main_search = 'https://denver.craigslist.org/search/bia?h\ asPic={}&bundleDuplicates={}&search_distance={}&postal={}&min_price=\ {}&max_price={}'.format(has_pic, bundle, distance, zip_, min_price, max_price) post_list = Scrape.scrape_search_pg(main_search) results = [] for post in post_list: post_Obj = Filter(post[0]) result = Filter.quick_filter(post_Obj) if result: result = Filter.size_filter(post_Obj) if result: results.append( Keywords.score(post_Obj, Keywords.find(post_Obj))) if len(results) > 0: results = sorted(results, key=lambda x: x[2], reverse=True) Message.send(Message.format(results))
def __getPlaintext(self): # extract plaintext from pdf paper = PdfLib(self.wd + os.sep + self.filename) textBeginning = self.__guessDocBegining(self.filename) plaintext = paper.pdf2txt(textBeginning, "max") # normalize text f = Filter(asString=plaintext) plaintext = f.substitutions() \ .oneCharPerLine() \ .normalizeCaracters() \ .lower() \ .uselessCharacters() \ .multipleDots() \ .listEnum() \ .digits() \ .shortTokens() \ .multipleSpaces() \ .getResult() # experience shows, that less than 6000 characters is mostly waste if len(plaintext) > 6000: result = {} result[self.langKey] = self.__guessLang(plaintext) result[self.plaintextKey] = plaintext result[self.filenameKey] = self.filename return result else: raise Exception(u"Document is too short.")
def __getPlaintext(self): # extract plaintext from pdf paper = PdfLib(self.wd + os.sep + self.filename) textBeginning = self.__guessDocBegining(self.filename) plaintext = paper.pdf2txt(textBeginning, "max") # normalize text f = Filter(asString=plaintext) plaintext = ( f.substitutions() .oneCharPerLine() .normalizeCaracters() .lower() .uselessCharacters() .multipleDots() .listEnum() .digits() .shortTokens() .multipleSpaces() .getResult() ) # experience shows, that less than 6000 characters is mostly waste if len(plaintext) > 6000: result = {} result[self.langKey] = self.__guessLang(plaintext) result[self.plaintextKey] = plaintext result[self.filenameKey] = self.filename return result else: raise Exception(u"Document is too short.")
def __init__(self, list_of_positive_training_file_paths, list_of_negative_training_file_paths): unfiltered_training_files = [val for pair in zip(list_of_positive_training_file_paths, list_of_negative_training_file_paths) for val in pair] filter = Filter() excellent_training_files = filter.get_excellent_training_files(unfiltered_training_files) poor_training_files = filter.get_poor_training_files(unfiltered_training_files) self.hitsCalculator = HitCalculator(excellent_training_files, poor_training_files) self.phrase_dictionary = {}
def get_entry(self, dn, attributes=None): if not attributes: attributes = ['*'] filter = Filter() filter.add_equal('objectClass', '*') results = self.search(dn, filter.build(), ldap.SCOPE_BASE, attributes) return results[0][1]
def addLogPath(self, path): if self.containsLogPath(path): logSys.error(path + " already exists") else: self.__lastModTime[path] = 0 self.__file404Cnt[path] = 0 Filter.addLogPath(self, path) logSys.info("Added logfile = %s" % path)
def delLogPath(self, path): if not self.containsLogPath(path): logSys.error(path + " is not monitored") else: del self.__lastModTime[path] del self.__file404Cnt[path] Filter.delLogPath(self, path) logSys.info("Removed logfile = %s" % path)
def _get_sorted_map_side(self, main, yesterdays_chefs): filt = Filter() weights_s = self._get_sorted_map(self.chefs_side) weights_s = filt.yesterday(yesterdays_chefs, weights_s) weights_s = filt.same_person(main, weights_s) weights_s = filt.roommates(main, self.roommates, weights_s) return weights_s
def __init__(self, **options): Filter.__init__(self, **options) self.predicates = self.predicates or self.build_predicate_list() if len(self.predicates) == 0: raise NotImplementedError("Must provide at least one predicate") self.normalize_predicates() self.normalize_counted_features() self.setup_counts()
def foveation_sequence(): frame_down_factor = 1 mem_down_factor = 2 # relative to the frame down factor coarse_down_factor = 2 # for the coarse comparison fs = 80 fovea_shape = (fs, fs) full_values = 128 values = full_values / 2**frame_down_factor index = 15 n_frames = 10 source = KittiMultiViewSource(index, test=False, n_frames=n_frames) full_shape = source.frame_ten[0].shape frame_ten = [downsample(source.frame_ten[0], frame_down_factor), downsample(source.frame_ten[1], frame_down_factor)] frame_shape = frame_ten[0].shape average_disp = source.get_average_disparity() average_disp = cv2.pyrUp(average_disp)[:frame_shape[0],:frame_shape[1]-values] filter = Filter(average_disp, frame_down_factor, mem_down_factor, fovea_shape, frame_shape, values, verbose=False, memory_length=0) plt.figure() import matplotlib.cm as cm for i in range(0, 10, 2): frame = [downsample(source.frame_sequence[i][0], frame_down_factor), downsample(source.frame_sequence[i][1], frame_down_factor)] filter_disp, fovea_corner = filter.process_frame(None, frame) edge = 5 plt.subplot(5,1,i/2+1) # plt.subplot(5,2,i+1) plt.imshow(trim(frame[0], values, edge), cmap = cm.Greys_r) # remove_axes() # plt.subplot(5,2,i+2) # plt.imshow(trim(filter_disp, values, edge), vmin=0, vmax=full_values) fovea_corner = fovea_corner[0] # plot_edges(fovea_ij, (fs, fs)) fi, fj = fovea_corner fm = fs fn = fs plt.plot([fj, fj+fn, fj+fn, fj, fj], [fi, fi, fi+fm, fi+fm, fi], 'white') # plt.scatter(fovea_corner[1]-values+fs/2, fovea_corner[0]-edge+fs/2, s=100, c='green', marker='+', linewidths=2) # plt.scatter(fovea_corner[1]-values, fovea_corner[0]-edge, s=9, c='green', marker='+', linewidths=3) # plt.scatter(fovea_corner[1]-values+fs, fovea_corner[0]-edge+fs, s=9, c='green', marker='+', linewidths=3) # plt.scatter(fovea_corner[1]-values, fovea_corner[0]-edge+fs, s=9, c='green', marker='+', linewidths=3) # plt.scatter(fovea_corner[1]-values+fs, fovea_corner[0]-edge, s=9, c='green', marker='+', linewidths=3) remove_axes() plt.tight_layout(-1) plt.show()
def searching(): results = [] #print(os.getcwd()) search_this = Filter(csv_path, search_fraze) for item in search_this.filter_file(): results.append(item[0]) results.append('\n') # return ''.join(str(results)) return results
class JobFilter(object): def __init__(self, sites): self._sites = sites self._jobs = [] self._bad_titled_jobs = [] self._bad_content_jobs = [] self._filter = Filter() def get_todays_jobs(self): self._get_todays_links() self._filter_on_titles() self._get_postings_content() self._filter_on_content() return (self._jobs, self._bad_content_jobs, self._bad_titled_jobs) def _get_todays_links(self): for site in self._sites: list_soup = self._beautiful_soupify_url(site.get_job_listing_url()) links = site.get_todays_links(list_soup) jobs = [] for link in links: jobs.append(Job(link.string, link['href'], site)) self._jobs = jobs def _filter_on_titles(self): get_posting_jobs = [] for job in self._jobs: if self._filter.title(job.get_title()): get_posting_jobs.append(job) else: self._bad_titled_jobs.append(job) self._jobs = get_posting_jobs def _get_postings_content(self): for job in self._jobs: content_soup = self._beautiful_soupify_url(job.get_link()) job.set_content(content_soup) def _filter_on_content(self): good_jobs = [] for job in self._jobs: if self._filter.content(job.get_content()): good_jobs.append(job) else: self._bad_content_jobs.append(job) self._jobs = good_jobs def _beautiful_soupify_url(self, url): html = urllib2.urlopen(url).read() return BeautifulSoup(html)
def __init__(self, nelx, nely, params, problem_type, bc, gui=None): """ Allocate and initialize internal data structures. """ n = nelx * nely self.nelx = nelx self.nely = nely self.opt = nlopt.opt(nlopt.LD_MMA, n) self.problem_type = problem_type # Alloc arrays self.x_phys = numpy.ones(n) self.x_rgb = numpy.ones((nelx, nely, 3)) # Set bounds lb = numpy.array(params.densityMin * numpy.ones(n, dtype=float)) ub = numpy.array(1.0 * numpy.ones(n, dtype=float)) bc.set_passive_elements(nelx, nely, lb, ub, params.problemOptions) self.opt.set_upper_bounds(ub) self.opt.set_lower_bounds(lb) # Set stopping criteria self.opt.set_maxeval(params.maxSolverStep) self.opt.set_ftol_rel(0.0) # Setup topopt problem if problem_type.involves_compliance(): self.problem = TopoptProblem(nelx, nely, params, bc) # Setup filter self.filtering = Filter(nelx, nely, params, problem_type) # Setup appearance matcher if problem_type.involves_appearance(): self.exemplar_rgb = images.load_exemplar(params) self.appearance_cl = AppearanceCL(params.lambdaOccurrenceMap, params.exponentSimilarityMetric, params.appearanceNormWeight) # Setup user parameters self.params = params # Setup functional right-hand sides self.volume_max = self.params.volumeFracMax * n self.volume_min = self.params.volumeFracMin * n self.appearance_max = 0 self.compliance_max = 0 if problem_type.involves_compliance(): if 'complianceMax' in params: self.compliance_max = params.complianceMax # Define optimization problem self.init_problem() # Set GUI callback self.gui = gui
def test_filter(self): filter = Filter(file_path='../../test_data/filter/Test.csv') date = datetime.datetime.strptime('1/3/2002', "%m/%d/%Y").date() self.assertTrue(filter.is_white_listed('ABEV3', date), 'filter not working') self.assertFalse(filter.is_white_listed('NEXISTE4', date), 'filter not working') """date = datetime.datetime.strptime("1/1/2000", "%m/%d/%Y").date()
def preprocess(self, prop): self.__logger.debug('preprocessing...') # filter images separately glob = self.__globalCNN.process(Filter(prop)) caus = self.__causticCNN.process(Filter(prop)) assert (glob.shape == caus.shape) predict = glob + caus return predict
def filter(self, q): """ q is the query. """ if not isinstance(q, str): s = "filter: invalid query argument. You must use a string." self.logger.error(s) raise TypeError(s) f = Filter(self.sites, q) f.apply()
def add_filter(self, value): """Creates filter :value: can be a Filter or a path to a file """ if(isinstance(value, Filter)): self._filter=value elif(isinstance(value, str)): self._filter=Filter(file_path=value) else: raise ValueError("Filter type is not supported")
def main(): initialize_log() config_params = parse_config_params() logging.info("Starting filter for: {}, operation: {} and parameter {}.".format( config_params['filter_key'], config_params['filter_operation'], config_params['filter_parameter'] )) filter = Filter(config_params['data_queue'], config_params['sink_exchange'], config_params['filter_operation'], config_params['filter_key'], config_params['filter_parameter'], config_params['filter_key_1'], config_params['filter_key_2']) filter.start()
def __init__(self): Filter.__init__(self) self._name = "Outlier" self._input = None self._dimensions = None self._output = None # cutoff: we compare the neighborhood average to the current datum; if the # datum is in the 95th (or higher) percentile, it is considered an outlier. self.set_parameter_floatrange("cutoff", 0.95, 0.0, 1.0) # if the averaging out is inclusive of the datum in question self.set_parameter("inclusive", False, bool)
def __init__(self): Filter.__init__(self) self._name = "Outlier" self._input = None self._dimensions = None self._output = None # cutoff: we compare the neighborhood average to the current datum; if the # datum is in the 95th (or higher) percentile, it is considered an outlier. self.set_parameter_floatrange("cutoff", 0.95, 0.0,1.0) # if the averaging out is inclusive of the datum in question self.set_parameter("inclusive", False, bool)
def get_audio(): # Stores what user said in var var = Speech().audio() # Var goes into find filter function and returns the found filter into choosenfilter global choosenfilter choosenfilter = Filter().find_filter(var) user_filter = Filter().get_name() global filtervideostream filtervideostream = FilterCamera(choosenfilter) curr_string = "You are currently using " + str(user_filter) + "!" return render_template('filters.html', message=curr_string)
def score_results(self, n_filtered, number_of_medoids, number_of_iterations): print("score_results") # Filtering the trajectory self.filtered_trajectory, self.filtered_ndx = Filter( self.trajectory, n_filtered).cabs_filter() # Clustering the trajectory self.medoids, self.clusters_dict, self.clusters = Clustering( self.filtered_trajectory, 'chain ' + ','.join(self.initial_complex.ligand_chains, )).cabs_clustering( number_of_medoids=number_of_medoids, number_of_iterations=number_of_iterations)
def filter_ads(url): adblock = Filter(file(ABSPATH+'/ressources/easylist.txt'), is_local=True) if len(adblock.match(self.url)) != 0: self.msg = 'Adblock url' self.status = False return False match_date = re.search(DATE_REGEX, self.url) # if we caught the verified date above, it's an article if match_date is not None: self.date = match_date.group() return True
def __init__(self, aggregatedData): self.__aData = aggregatedData self.__currentType = CUSTOMIZATION_TYPE.CAMOUFLAGE self.__currentSlotIdx = 0 self.__currentDuration = 0 self.__carouselItems = [] self.filter = Filter(self.__aData.availableGroupNames) self.filter.changed += self.__updateCarouselData self.slots = Slots(self.__aData) self.slots.selected += self.__onSlotSelected self.slots.updated += self.__onSlotUpdated self.updated = Event()
def test_filter(): mappings = [{'source': 'title'}, {'source': 'description'}, {'source': 'adfasdf'}, {'no source': 'adfasdf'}] filter = Filter(mappings) result = {} result['items'] = [{ 'author': 'Fred', 'guid': 1234, 'title': 'Gee golly', 'description': 'This is fun.' }] result = filter.filter(result) print result assert result['items'] == [{'title': 'Gee golly', 'description': 'This is fun.'}]
def prewitt(self): current_image = self.result_image if self.stack_filters else self.input_image if len(self.result_image.shape) == 3: img_r, img_g, img_b = cv2.split(self.result_image) img_r = Filter.prewitt(img_r) img_g = Filter.prewitt(img_g) img_b = Filter.prewitt(img_b) self.result_image = cv2.merge((img_r, img_g, img_b)) else: self.result_image = Filter.prewitt(current_image) self.show_image()
def test_filter_excluded_GAVs(self): config = Configuration() alf = Filter(config) config.excludedGAVs = ["com.google.guava:guava:1.1.0"] al = copy.deepcopy(self.artifactList) self.assertTrue('1.1.0' in al['com.google.guava:guava']['1']) alf._filterExcludedGAVs(al) self.assertFalse('1.1.0' in al['com.google.guava:guava']['1']) config.excludedGAVs = ["com.google.guava:guava:1.0*"] al = copy.deepcopy(self.artifactList) self.assertTrue('1.0.0' in al['com.google.guava:guava']['1']) self.assertTrue('1.0.1' in al['com.google.guava:guava']['1']) self.assertTrue('1.0.2' in al['com.google.guava:guava']['2']) self.assertTrue('1.0.0' in al['com.google.guava:guava']['3']) alf._filterExcludedGAVs(al) self.assertFalse('1.0.0' in al['com.google.guava:guava']['1']) self.assertFalse('1.0.1' in al['com.google.guava:guava']['1']) self.assertFalse('2' in al['com.google.guava:guava']) self.assertFalse('1.0.0' in al['com.google.guava:guava']['3']) config.excludedGAVs = ["com.google.guava:*"] al = copy.deepcopy(self.artifactList) self.assertTrue('com.google.guava:guava' in al) alf._filterExcludedGAVs(al) self.assertFalse('com.google.guava:guava' in al)
def test_filter_duplicates(self): config = Configuration() alf = Filter(config) al = copy.deepcopy(self.artifactList) self.assertTrue('1.0.0' in al['com.google.guava:guava:pom']['1']) self.assertTrue('1.0.0' in al['com.google.guava:guava:pom']['3']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo:jar']['1']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo:jar']['2']) alf._filterDuplicates(al) self.assertTrue('1.0.0' in al['com.google.guava:guava:pom']['1']) self.assertFalse('1.0.0' in al['com.google.guava:guava:pom']['3']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo:jar']['1']) self.assertFalse('1.0.1' in al['org.jboss:jboss-foo:jar']['2'])
def test_filter_duplicates(self): config = Configuration() alf = Filter(config) al = copy.deepcopy(self.artifactList) self.assertTrue('1.0.0' in al['com.google.guava:guava']['1']) self.assertTrue('1.0.0' in al['com.google.guava:guava']['3']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo']['1']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo']['2']) alf._filterDuplicates(al) self.assertTrue('1.0.0' in al['com.google.guava:guava']['1']) self.assertFalse('1.0.0' in al['com.google.guava:guava']['3']) self.assertTrue('1.0.1' in al['org.jboss:jboss-foo']['1']) self.assertFalse('1.0.1' in al['org.jboss:jboss-foo']['2'])
def route_dic(terms, temp_original_liwc=None, temp_tags=None): route_dics = defaultdict(SetMultimap) f = Filter(term_in_collection=temp_original_liwc, tag_in_collection=temp_tags) for term, tags in terms._dict.iteritems(): for tag in tags: code = f.route(term, tag) d = route_dics[code] d[term].add(tag) for n, dic in route_dics.iteritems(): dump_dic('%s/C%d.txt' % (dic_dir, n), dic=dic)
def __init__(self, filter, startpage, maxpages): self.filter = filter self.startpage = startpage self.maxpages = maxpages filter = Filter(self.filter, self.startpage, self.maxpages) self.search_url = filter.create_filter_url(self.startpage) handler = Handler(self.search_url) self.search_results = handler.init() if self.search_results["pages"] < startpage: raise Exception("Error: startpage exceeds available pages [pages=" + str(self.search_results["pages"]) + "]") if self.search_results["pages"] < startpage + maxpages: self.maxpages = self.search_results["pages"]
class MainWin(QtGui.QMainWindow): lyricEditor = None def __init__(self, parent=None): super(MainWin, self).__init__(parent) self.setupUi() self.retranslateUi() pass def setupUi(self): ''' main style sheet ''' style_file = QtCore.QFile("ui/style.css") style_file.open(QtCore.QFile.ReadOnly) style_sheet = QtCore.QLatin1String(style_file.readAll()) self.setStyleSheet(style_sheet) self.setObjectName("MainWindow") self.resize(900, 680) self.centralwidget = QtGui.QWidget(self) self.centralwidget.setObjectName("centralwidget") self.setCentralWidget(self.centralwidget) ''' ===========菜单================================================= ''' # 菜单 self.menu_bar = MenuBar(self) self.setMenuBar(self.menu_bar) ''' ''' self.statusbar = QtGui.QStatusBar(self) self.statusbar.setObjectName("statusbar") self.setStatusBar(self.statusbar) self.pianoRoll = PianoRoll(self.centralwidget) # self.pianoRoll.hide() self.filter = Filter(self.centralwidget) self.filter.setGeometry(QtCore.QRect(630, 10, 241, 441)) self.lyricEditor = QtGui.QTextEdit(self.centralwidget) self.lyricEditor.setGeometry(QtCore.QRect(self.filter.x(), 45, 200, 300)) def retranslateUi(self): self.setWindowTitle(const.WINDOW_TITLE)
def filter_ads(url): info = {} info["url"] = url adblock = Filter(file(ABSPATH+'/ressources/easylist.txt'), is_local=True) if len(adblock.match(url)) != 0: info['msg'] = 'Adblock url' return (False, info) match_date = re.search(DATE_REGEX, url) # if we caught the verified date above, it's an article if match_date is not None: info["date"] = match_date.group() info['msg'] = 'verified for date: %s' % info["date"] return (True, info) else: return (True, info)
def parse_filter(self): last_operator = Filter.__or__ result = None negated = False while self.has_tokens(): token = self.get() if token == '(': filter = self.parse_filter() result = self.combine(result, filter, last_operator, negated) last_operator = Filter.__or__ negated = False elif token == ')': return result elif token == 'and': last_operator = Filter.__and__ negated = False elif token == 'or': last_operator = Filter.__or__ negated = False elif token == 'not': negated = True else: filter = Filter.matches(re.compile(token)) result = self.combine(result, filter, last_operator, negated) last_operator = Filter.__or__ negated = False return result
def __init__(self, pitchnum, stdout, sourcefile, resetPitchSize, resetThresholds, displayBlur, normalizeAtStartup, noDribbling): self.running = True self.connected = False self.stdout = stdout if sourcefile is None: self.cap = Camera() else: filetype = 'video' if sourcefile.endswith(('jpg', 'png')): filetype = 'image' self.cap = VirtualCamera(sourcefile, filetype) calibrationPath = os.path.join('calibration', 'pitch{0}'.format(pitchnum)) self.cap.loadCalibration(os.path.join(sys.path[0], calibrationPath)) self.preprocessor = Preprocessor(pitchnum, resetPitchSize) if self.preprocessor.hasPitchSize: self.gui = Gui(self.preprocessor.pitch_size) else: self.gui = Gui() self.threshold = Threshold(pitchnum, resetThresholds, displayBlur, normalizeAtStartup) self.thresholdGui = ThresholdGui(self.threshold, self.gui) self.features = Features(self.gui, self.threshold) self.filter = Filter(noDribbling) eventHandler = self.gui.getEventHandler() eventHandler.addListener('q', self.quit) while self.running: try: if not self.stdout: self.connect() else: self.connected = True if self.preprocessor.hasPitchSize: self.outputPitchSize() self.gui.setShowMouse(False) else: eventHandler.setClickListener(self.setNextPitchCorner) while self.running: self.doStuff() except socket.error: self.connected = False # If the rest of the system is not up yet/gets quit, # just wait for it to come available. time.sleep(1) # Strange things seem to happen to X sometimes if the # display isn't updated for a while self.doStuff() if not self.stdout: self.socket.close()
def __init__(self): Thread.__init__(self) logging.basicConfig(filename='trottle.log',level=logging.DEBUG) logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\ datefmt='%m/%d/%Y %I:%M:%S %p') self.stopped = False self.active_filters = 0 self.filter_delete_counter = 0 self.tokens = list() self.devices = dict() self.max_devices = { 'Number': 0, 'Time' : datetime.now() } self.speed = { 'Down' : 0, 'Up' : 0, 'Last_Update' : datetime.now() } self.filter = Filter() self.generate_tokens() if self.USE_SNMP: self.SNMPDetector = SNMPDetector() self.SNMPDetector.start()
class SledFilter: def __init__(self, targets, period): self._dp_filter = Filter(targets, period, self._dp_callback) self._src_filter = Filter(targets, period, self._src_callback) def _dp_callback(self, key): pass def _src_callback(self, key): pass def process(self, sled): self._dp_filter.process( sled.get_dp(), sled.get_dst(), sled.get_timestamp()) self._src_filter.process( sled.get_src(), sled.get_dst(), sled.get_timestamp())
def test_qeinput_filter(self): input = QEInput(config=fixtures.textMain) f = Filter("fPlus") f.setParam("control", "calculation", "'md'") f.setCard({"name": "occupations", "lines": ("New line",)}) input.applyFilter(f, "plus") self.assertEqual(input.toString(), fixtures.assertInputFilterPlus) f = Filter("fMinus") f.setNamelist({"name":"control"}) f.setCard({"name": "k_points"}) input.applyFilter(f, "minus") self.assertEqual(input.toString(), fixtures.assertInputFilterMinus)
def route_dic(terms, temp_original_liwc = None, temp_tags = None): route_dics = defaultdict(SetMultimap) f = Filter( term_in_collection = temp_original_liwc, tag_in_collection = temp_tags ) for term, tags in terms._dict.iteritems(): for tag in tags: code = f.route(term, tag) d = route_dics[code] d[term].add(tag) for n,dic in route_dics.iteritems(): dump_dic('%s/C%d.txt' % (dic_dir,n), dic=dic)
def Reset(): """Reset the library. Useful for re-initializing to a different server.""" data.reset() ApiFunction.reset() Image.reset() Feature.reset() Collection.reset() ImageCollection.reset() FeatureCollection.reset() Filter.reset() Geometry.reset() Number.reset() String.reset() _ResetGeneratedClasses() global Algorithms Algorithms = _AlgorithmsContainer()