def test_filter_apply(self): input = QEInput(config=fixtures.textMain) # Filter that adds parameters to input fp = Filter("fPlus") fp.setParam("control", "prefix", "'ni'") fp.setParam("control", "pseudo_dir", "''") fp.setParam("control", "outdir", "''") fp.setCard({ "name": "occupations", "lines": ("New line", ) }) # Add new card fp.setNamelist({ "name": "cell", "params": { "hello": "world" } }) # Add new namelist fp.setNamelist({"name": "phonon"}) fp.apply(input, "plus") self.assertEqual(input.toString(), fixtures.assertPlus) # Filter that removes parameters from input fm = Filter("fMinus") fm.setCard({"name": "atomic_species"}) fm.setNamelist({"name": "phonon"}) # Remove namelist fm.setParam("cell", "hello") # Remove parameter that makes namelist empty fm.setParam("control", "prefix") # Remove parameter fm.apply(input, "minus") self.assertEqual(input.toString(), fixtures.assertMinus)
def preprocess(self, prop): self.__logger.debug('preprocessing...') # filter images separately glob = self.__globalCNN.process(Filter(prop)) caus = self.__causticCNN.process(Filter(prop)) assert (glob.shape == caus.shape) predict = glob + caus return predict
def get_audio(): # Stores what user said in var var = Speech().audio() # Var goes into find filter function and returns the found filter into choosenfilter global choosenfilter choosenfilter = Filter().find_filter(var) user_filter = Filter().get_name() global filtervideostream filtervideostream = FilterCamera(choosenfilter) curr_string = "You are currently using " + str(user_filter) + "!" return render_template('filters.html', message=curr_string)
def config_analysis(alg_list): build_lep = Filter('BuildLepton') build_lep.cut_incTau = 'False' build_lep.cut_incTauMother = 'False' alg_list.append(build_lep) filt_event = Filter('FilterEvent') filt_event.cut_nLep = '> 0' alg_list.append(filt_event)
def main(): keyword = str(input('please input keyword\n')) targetSite = str(input('please input target site\n')) if keyword == '': print('no keyword') else: if isFilterWanted(): filter = Filter() filter.set() else: filter = Filter() if isSortWanted(): sorter = Sorter() sorter.set() else: sorter = Sorter() if targetSite.lower() == 'pchome': search.searchOnPChome(keyword, filter, sorter) elif targetSite.lower() == 'shopee' or targetSite.lower( ) == 'xiapi' or targetSite == '蝦皮': search.searchOnShopee(keyword, filter, sorter) elif targetSite.lower() == 'qoo10': search.searchOnQoo10(keyword, filter, sorter) elif targetSite.lower() == 'etmall' or targetSite == '東森': search.searchOnEtmall(keyword, filter, sorter) elif targetSite.lower() == 'rakuten' or targetSite == '樂天': search.searchOnRakuten(keyword, filter, sorter) elif targetSite.lower() == 'all': search.searchOnPChome(keyword, filter, sorter) search.searchOnShopee(keyword, filter, sorter) search.searchOnQoo10(keyword, filter, sorter) search.searchOnEtmall(keyword, filter, sorter) elif targetSite.lower() == 'exit': exit() else: print('target site is not supported\nPlease input again or exit') main() #recursive
def main(): distance = '30' zip_ = '80223' min_price = '300' max_price = '1500' has_pic = '1' # 0 to disable bundle = '1' # 0 to disable main_search = 'https://denver.craigslist.org/search/bia?h\ asPic={}&bundleDuplicates={}&search_distance={}&postal={}&min_price=\ {}&max_price={}'.format(has_pic, bundle, distance, zip_, min_price, max_price) post_list = Scrape.scrape_search_pg(main_search) results = [] for post in post_list: post_Obj = Filter(post[0]) result = Filter.quick_filter(post_Obj) if result: result = Filter.size_filter(post_Obj) if result: results.append( Keywords.score(post_Obj, Keywords.find(post_Obj))) if len(results) > 0: results = sorted(results, key=lambda x: x[2], reverse=True) Message.send(Message.format(results))
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc): self.n = nelx * nely self.opt = nlopt.opt(nlopt.LD_MMA, self.n) self.passive = bc.get_passive_elements() self.xPhys = np.ones(self.n) if self.passive is not None: self.xPhys[self.passive] = 0 # set bounds ub = np.ones(self.n, dtype=float) self.opt.set_upper_bounds(ub) lb = np.zeros(self.n, dtype=float) self.opt.set_lower_bounds(lb) # set stopping criteria self.opt.set_maxeval(2000) self.opt.set_ftol_rel(0.001) # set objective and constraint functions self.opt.set_min_objective(self.compliance_function) self.opt.add_inequality_constraint(self.volume_function, 0) # setup filter self.ft = ft self.filtering = Filter(nelx, nely, rmin) # setup problem def self.init_problem(nelx, nely, penal, bc) self.volfrac = volfrac # set GUI callback self.init_gui(gui)
def config_analysis( alg_list ) : filt_event = Filter('FilterTauEvent') filt_event.cut_nTau = '== 1' filt_event.cut_tauStatus = ' == 3' alg_list.append(filt_event)
def __init__(self): ## -----*----- コンストラクタ -----*----- ## # 分離器 self.filter = Filter() thread = threading.Thread(target=self.filter.exe) thread.start() self._pa = pyaudio.PyAudio() # 音声入力の設定 self.settings = { 'format': pyaudio.paInt16, 'channels': 1, 'rate': 8000 * 4, 'chunk': 1024, } self.stream = self._pa.open(format=pyaudio.paFloat32, channels=self.settings['channels'], rate=self.settings['rate'], input=True, output=False, frames_per_buffer=self.settings['chunk']) # 音量・閾値などの状態保管 self.state = { 'amp': 0, 'total': 0, 'cnt': 0, 'border': 9999, 'average': 0 } # コンソール出力 self.console = Console('./config/outer.txt') self.is_exit = False
def test_filter_finish_same_as_stop(self): words = [ 'cow', 'foo', 'fish', 'flamingo', 'trampoline', 'apollo', 'cow', 'pancakes', ] options = {'start': 'cow', 'stop': 'Flamingo', 'finish': 'flamingo'} res = [ '', 'foo', 'fish', None, '', '', '', 'pancakes', ] w = Filter(options) filtered = [] for word in words: filtered.append(w.filter(word)) self.assertEqual(res, filtered)
def test_filter_namelist(self): f = Filter("filter") nldict = { "name": "control", "params": { "calculation": "'scf'", "restart_mode": "'from_scratch'" } } f.setNamelist(nldict) self.assertEqual(len(f.namelists()), 1) nldictF = {"a": "b"} # Doesn't have "name" key self.assertRaises(KeyError, f.setNamelist, nldictF) # Exception, callable, parameters self.assertEqual(len(f.namelists()), 1) # Doesn't add namelist nldictF2 = "simple string" self.assertRaises(TypeError, f.setNamelist, nldictF2) # Exception, callable, parameters self.assertEqual(len(f.namelists()), 1) # Doesn't add namelist nl = f.namelists()[0] # Check values self.assertEqual(nl.get("calculation", quotes=False), "scf") self.assertEqual(nl.name(), "control") self.assertEqual(nl.toString(), fixtures.assertC_filter_namelist) f.removeNamelist("control") # Remove namelist self.assertEqual(len(f.namelists()), 0)
def __init__(self, trigger, filter=Filter(), action=Action(), name=""): """Create a new rule given a trigger, a filter and an action We can give a list of action or a list of filter instead of a single action or filter, in that case the actions will be turned into a ListAction and the filters into an AndFilter. """ Trigger.__init__(self) Action.__init__(self) # We accept list OR single value as argument if isinstance(filter, list): filter = AndFilter(*filter) if isinstance(action, list): action = ListAction(action) assert isinstance(trigger, Trigger) assert isinstance(action, Action) assert isinstance(filter, Filter) self.__trigger = trigger # The trigger will call this rule when triggered trigger.connect(self) self.__filter = filter self.__action = action self.connect(action) self.name = name
def test_emoji(self): app = Filter() app.blocks = ['🚫'] sentences = [ 'orospu çocuğu yarak gibi kitap', 'orrrrrooooossssspuuuu evladı', 'am gibi kitap', 'sik gibi kitap', 'Sikimi ye Booooookkkk gibi kitap', ] results = [ '🚫 çocuğu 🚫 gibi kitap', '🚫 evladı', '🚫 gibi kitap', '🚫 gibi kitap', '🚫 ye 🚫 gibi kitap', ] news = [] for sentence in sentences: news.append(app.replace(sentence)) self.assertEqual(news, results)
def test_filter_simple(self): res = [ '', '', '', 'cow', 'siamese', 'wonderland', 'foo', 'toothpaste', '', '', '', 'milky', 'flight-manual', 'toothpick', '', None, '', '', ] generated = [] filter = Filter(options) for word in words: generated.append(filter.filter(word)) self.assertEqual(generated, res)
class TestFilter(unittest.TestCase): # path = os.environ.get("THESIS") + "/data/testData" path = "../data/testData" data = DataSet(path) filter = Filter(data, EuclidianDistance()) itemFilt = ItemBasedFilter(data, EuclidianDistance()) def testRecommendations(self): 'based on data, Dave should have HungerGames as a recommendation' recs = self.filter.getRecommendations("Dave") recsDict = dict(recs) self.assertIn('HungerGames', recsDict) def test_kNearestNeighbors(self): '''Bob should be one of Dave's nearest neighbors''' nearest = self.filter.kNearestNeighbors("Dave", 2) print(nearest) nearestNames = [tup[1] for tup in nearest] self.assertIn("Bob", nearestNames) def test_getItemSimData(self): '''Should be a dict with items as keys''' print(self.itemFilt.itemSimDict) self.assertIn("HungerGames", self.itemFilt.itemSimDict, "HungerGames is not in itemSimDict") self.assertIsInstance(self.itemFilt.itemSimDict, dict, "itemSimDict is not a dict.")
def objective(args): args['ksize'] = int(args['ksize']) filter = Filter(average_disparity, frame_down_factor, mem_down_factor, fovea_shape, frame_shape, values, verbose=False) filter.params = dict(args) costs = [] for i in range(source.n_frames): frame = [ downsample(source.video[i][0], frame_down_factor), downsample(source.video[i][1], frame_down_factor) ] disp, fovea_corner = filter.process_frame(source.positions[i], frame) true_disp = downsample(source.ground_truth[i], frame_down_factor) costs.append(cost(disp[:, values:], true_disp, average_disparity)) mean_cost = np.mean(costs) print(mean_cost, args) return mean_cost
def train(): # prepare data for sentencepiece training PrepareData.extract_each_sentence('./../data/taiyaku.tsv', './../data/ja_sentences.tsv', 'ja') PrepareData.extract_each_sentence('./../data/taiyaku.tsv', './../data/en_sentences.tsv', 'en') # train sentencepiece model Tokenizer.train_sentencepiece(32000, 'ja') #data augment da = DataAugmentation() da.add_aux_corpus() # data filtering process fl = Filter('./../data/taiyaku.tsv') fl.one_multi_filter() print('1st done') fl.src_equal_trg_filter() print('second done') fl.non_alphabet_filter(0.5) print('') fl.correct_lang_filter() fl.update_df() # prepare data for feeding to a model to make PrepareData.prepare_tokenized_taiyaku('./../data/tokenized_taiyaku.tsv') t2t = PyT2T() # data generate t2t.data_gen() # train phase t2t.train()
def test_qeinput_filter(self): input = QEInput(config=fixtures.textMain) f = Filter("fPlus") f.setParam("control", "calculation", "'md'") f.setCard({"name": "occupations", "lines": ("New line", )}) input.applyFilter(f, "plus") self.assertEqual(input.toString(), fixtures.assertInputFilterPlus) f = Filter("fMinus") f.setNamelist({"name": "control"}) f.setCard({"name": "k_points"}) input.applyFilter(f, "minus") self.assertEqual(input.toString(), fixtures.assertInputFilterMinus)
def test_deletion(self): app = Filter() sentences = [ 'orospu çocuğu yarak gibi kitap', 'orrrrrooooossssspuuuu evladı', 'am gibi kitap', 'sik gibi kitap', 'Sikimi ye Booooookkkk gibi kitap', 'güzel kitap çok sevdim', 'resmen mükemmel', ] results = [ 'güzel kitap çok sevdim', 'resmen mükemmel', ] news = [] for sentence in sentences: result = app.detect(sentence) if result: news.append(result) self.assertEqual(news, results)
def test_filter_excluded_GAVs(self): config = Configuration() alf = Filter(config) config.excludedGAVs = ["com.google.guava:guava:1.1.0"] al = copy.deepcopy(self.artifactList) self.assertTrue('1.1.0' in al['com.google.guava:guava']['1']) alf._filterExcludedGAVs(al) self.assertFalse('1.1.0' in al['com.google.guava:guava']['1']) config.excludedGAVs = ["com.google.guava:guava:1.0*"] al = copy.deepcopy(self.artifactList) self.assertTrue('1.0.0' in al['com.google.guava:guava']['1']) self.assertTrue('1.0.1' in al['com.google.guava:guava']['1']) self.assertTrue('1.0.2' in al['com.google.guava:guava']['2']) self.assertTrue('1.0.0' in al['com.google.guava:guava']['3']) alf._filterExcludedGAVs(al) self.assertFalse('1.0.0' in al['com.google.guava:guava']['1']) self.assertFalse('1.0.1' in al['com.google.guava:guava']['1']) self.assertFalse('2' in al['com.google.guava:guava']) self.assertFalse('1.0.0' in al['com.google.guava:guava']['3']) config.excludedGAVs = ["com.google.guava:*"] al = copy.deepcopy(self.artifactList) self.assertTrue('com.google.guava:guava' in al) alf._filterExcludedGAVs(al) self.assertFalse('com.google.guava:guava' in al)
def __getPlaintext(self): # extract plaintext from pdf paper = PdfLib(self.wd + os.sep + self.filename) textBeginning = self.__guessDocBegining(self.filename) plaintext = paper.pdf2txt(textBeginning, "max") # normalize text f = Filter(asString=plaintext) plaintext = f.substitutions() \ .oneCharPerLine() \ .normalizeCaracters() \ .lower() \ .uselessCharacters() \ .multipleDots() \ .listEnum() \ .digits() \ .shortTokens() \ .multipleSpaces() \ .getResult() # experience shows, that less than 6000 characters is mostly waste if len(plaintext) > 6000: result = {} result[self.langKey] = self.__guessLang(plaintext) result[self.plaintextKey] = plaintext result[self.filenameKey] = self.filename return result else: raise Exception(u"Document is too short.")
def test_filter_card(self): f = Filter("filter") card = { "name": "k_points", "lines": ("4 4 4 1 1 1", ), "arg": "automatic" } f.setCard(card) self.assertEqual(len(f.cards()), 1) cardF = {"a": "b"} # Doesn't have "name" key self.assertRaises(KeyError, f.setCard, cardF) # Exception, callable, parameters self.assertEqual(len(f.cards()), 1) # Doesn't add card cardF2 = "simple string" self.assertRaises(TypeError, f.setCard, cardF2) # Exception, callable, parameters self.assertEqual(len(f.cards()), 1) # Doesn't add card card = f.cards()[0] # Check values self.assertEqual(card.arg(), "automatic") self.assertEqual(card.name(), "k_points") self.assertEqual(card.toString(), fixtures.assertC_filter_card) f.removeCard("k_points") # Remove card self.assertEqual(len(f.cards()), 0)
def filter_from_database(self): data = self.Statue.Database.get_filters() self.Logger.debug('Filter in database : {}'.format(data)) dic = {} for old_filter in data: user = self.get_user(old_filter[0]) filter_name = old_filter[1] toy_color = old_filter[6] toy_name = self.option_from_type(old_filter[2], self.Statue.Options.product_name()) toy_type = self.option_from_type(old_filter[5], self.Statue.Options.type_option()) size = self.option_from_type(old_filter[3], self.Statue.Options.size_option()) comparator = self.option_from_type(old_filter[4], self.Statue.Options.comparator_option()) firmness = self.option_from_type(old_filter[7], self.Statue.Options.firmness_option()) cum_tube = self.option_from_type(old_filter[8], self.Statue.Options.cum_tub_option()) suction_cup = self.option_from_type(old_filter[9], self.Statue.Options.suction_cup_option()) flop = self.option_from_type(old_filter[10], self.Statue.Options.flop_option()) new_filter = Filter(filter_name, toy_name, toy_type, size, comparator, toy_color, firmness, cum_tube, suction_cup, flop) if user is not None and user in dic: dic[user].add_filter(new_filter) elif user is not None: dic[user] = client.Client(user, self.Logger, self, self.Statue.Parser, self.Statue.ToyInShop) dic[user].add_filter(new_filter) else: self.Logger.debug("user with id:{} don't exist".format(old_filter[0])) self.Statue.UserFilter = dic
def text_reply(msg): print msg content = msg['Content'] if msg['FromUserName'] == msg['ToUserName']: print content WORD_CLEAR = u"自助回复清除成功" WORD_ADD = u"自助回复增加成功" WORD_UPDATE = u"自助回复更新成功" if content == "QC": #清除 AUTO_REPLY.clear() itchat.send('%s' % (WORD_CLEAR), msg['FromUserName']) return sub = content.split("Q") if len(sub) == 3: q = sub[1] a = sub[2] AUTO_REPLY[q] = a itchat.send('%s' % (WORD_ADD), msg['FromUserName']) return if msg['Type'] == 'Text': if AUTO_REPLY.has_key(content): itchat.send('%s' % (AUTO_REPLY[content]), msg['FromUserName']) return if msg['Type'] == 'Sharing': imgFilter = Filter() img_list = imgFilter.getContents(msg['Url']) i=0 for img in img_list: print img_list,img,i i = i+1 itchat.send('@img@%s' % str(i)+".gif" ,msg['FromUserName'])
def handle_filter(): global DONE, CRAWLED, QUEUE while True: try: if QUEUE.empty(): warning("QUEUE IS EMPTY\n") warning("PROGRAM WILL STOP\n") DONE = True exit(0) Filter(PROJECT_NAME + "/output.txt", TO_FILTER) TO_FILTER[:] = [] #Cleans up the list except KeyboardInterrupt: #if the user exists , [Control+c] warning("Saving lists...\nDo not exit the script!") DONE = True # To stop the threads ''' This part of the script handles the saving of the two lists so the user can resume the spider after ''' write_data_to_file(PROJECT_NAME + "/Crawled.txt", '') for url in CRAWLED: append_data_to_file(PROJECT_NAME + "/Crawled.txt", url) write_data_to_file(PROJECT_NAME + "/Queue.txt", "") while QUEUE.empty() == False: append_data_to_file(PROJECT_NAME + "/Queue.txt", QUEUE.get_nowait()) interface() except Exception as e: warning("An error has occured:{}".format(str(e)))
def test_filter(): time = 2*np.pi*(2*np.arange(1000)/1000 - 1) signal = np.exp(-(2*time)**2) noisy_signal = signal + 0.05*np.random.normal(size=1000) filtered_signal = Filter(threshold=5).apply(noisy_signal) diff = np.sum(np.abs(signal - np.real(filtered_signal)))/(time[-1] - time[0]) assert diff < 1.0
def __init__(self, registerfile, shiftregister, reset=0, load_file=None): self.registerfile = registerfile self.shiftregister = shiftregister # add control units self._units = {} self.led = Led(self.registerfile) self._units['LEDs'] = self.led self.hitlogic = HitLogic(self.registerfile) self._units['Hit logic'] = self.hitlogic self.filter = Filter(self.registerfile) self._units['Filter'] = self.filter self.monitor = Monitor(self.shiftregister) self._units['Monitor'] = self.monitor self.frontend = Frontend(self.shiftregister) self._units['Frontend'] = self.frontend self.adcbias = AdcBias(self.shiftregister) self._units['ADC bias'] = self.adcbias self.digital = Digital(self.registerfile) self._units['Digital'] = self.digital if reset: self.reset() self.apply() if load_file: self.load(load_file) self.update()
def main(): Pipeline([ Filter( 'Ignore', Operation(OR, [ Clause('from", "*****@*****.**'), Operation(AND, [ Clause('from", "*****@*****.**'), Clause('subject", ""Advertisement"'), ]), ]), Option(apply_label=False, skip_inbox=True), ), Filter( 'Alarm', Operation(OR, [ Operation(AND, [ Clause('to", "*****@*****.**'), Clause('subject", "Panic'), ]), Operation(AND, [ Clause('from", "*****@*****.**'), ]), Clause('from", "*****@*****.**'), ]), ), Filter( 'Report', Operation(OR, [ Clause('subject", "News'), Clause('subject", ""Weekly Report"'), Clause('to", "*****@*****.**'), ]), ), Filter( 'Random', Clause('from", "*****@*****.**'), ), Filter( 'Company', Operation(OR, [ Clause('to", "*****@*****.**'), Clause('to", "*****@*****.**'), ]), ), ]).save('output.xml')
def delete_intermediate(self): for file in self._dir: self.to_trash(file) if self.switches['MERGE_INPUT'][0]: self.to_trash(self.dir.input_seqs) Filter(Directory.filter_dir).delete_intermediate() Cluster(Directory.cluster_dir).delete_intermediate() return
def get_filter(self, section_name=''): """Returns the Filter association with the asked section if it exits. """ config_section = self._get_config_section(section_name) if config_section: config_filter = self._get_config_filter(section_name, config_section) return Filter(**config_filter)