def test_subproc_toks_ls_l_semi_ls_first(): lsdl = 'ls -l' ls = 'ls' s = '{0}; {1}'.format(lsdl, ls) exp = '$[{0}]; {1}'.format(lsdl, ls) obs = subproc_toks(s, lexer=LEXER, maxcol=6, returnline=True) assert_equal(exp, obs)
def test_recursive_solve(): rec_sudoku = " , , , , 5, 3, , , ;\n" + \ "1, , , 6, , , , , 8;\n" + \ " , 5, , , , 1, , 4, ;\n" + \ "4, , , , 9, , 5, 3, ;\n" + \ " , , 9, 7, , 6, 8, , ;\n" + \ " , 2, 7, , 3, , , , 6;\n" + \ " , 4, , 1, , , , 8, ;\n" + \ "2, , , , , 7, , , 1;\n" + \ " , , , 3, 2, , , , ;\n" solution = "6, 8, 4, 2, 5, 3, 1, 7, 9;\n" + \ "1, 9, 3, 6, 7, 4, 2, 5, 8;\n" + \ "7, 5, 2, 9, 8, 1, 6, 4, 3;\n" + \ "4, 1, 6, 8, 9, 2, 5, 3, 7;\n" + \ "5, 3, 9, 7, 1, 6, 8, 2, 4;\n" + \ "8, 2, 7, 4, 3, 5, 9, 1, 6;\n" + \ "3, 4, 5, 1, 6, 9, 7, 8, 2;\n" + \ "2, 6, 8, 5, 4, 7, 3, 9, 1;\n" + \ "9, 7, 1, 3, 2, 8, 4, 6, 5;\n" sudoku = Sudoku() sudoku.read_string(rec_sudoku) sudoku.recursive_solve() assert_equal(str(sudoku), solution)
def test_number_of_edges(self): N = 100 G = dag.cube_space_graph(N, 2, 0.) assert_true(G.number_of_edges() == 0) G = dag.cube_space_graph(N, 1, 1.) assert_equal(G.number_of_edges(), (N*(N-1)/2))
def test_subproc_toks_ls_l_semi_ls_second(): lsdl = 'ls -l' ls = 'ls' s = '{0}; {1}'.format(lsdl, ls) exp = '{0}; $[{1}]'.format(lsdl, ls) obs = subproc_toks(s, lexer=LEXER, mincol=7, returnline=True) assert_equal(exp, obs)
def test_metadata(): test_metadata = {'dtype': 'float64', 'shape': [1024], 'others': [], } received_metadata = pack_unpack_fp(1, metadata=test_metadata) nt.assert_equal(test_metadata, received_metadata)
def test_equality_encoding_realm_emptyValues(self): expected_value = ({ 'oauth_nonce': ['4572616e48616d6d65724c61686176'], 'oauth_timestamp': ['137131200'], 'oauth_consumer_key': ['0685bd9184jfhq22'], 'oauth_something': [' Some Example'], 'oauth_signature_method': ['HMAC-SHA1'], 'oauth_version': ['1.0'], 'oauth_token': ['ad180jjd733klru7'], 'oauth_empty': [''], 'oauth_signature': ['wOJIO9A2W5mFwDgiDvZbTSMK/PY='], }, 'Examp%20le' ) assert_equal(expected_value, parse_authorization_header_value('''\ OAuth\ \ realm="Examp%20le",\ oauth_consumer_key="0685bd9184jfhq22",\ oauth_token="ad180jjd733klru7",\ oauth_signature_method="HMAC-SHA1",\ oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",\ oauth_timestamp="137131200",\ oauth_nonce="4572616e48616d6d65724c61686176",\ oauth_version="1.0",\ oauth_something="%20Some+Example",\ oauth_empty=""\ '''), "parsing failed.")
def _req(cls, method, *args, **kwargs): use_token = kwargs.pop('use_token', True) token = kwargs.pop('token', None) if use_token and token is None: admin = kwargs.pop('admin', False) if admin: if cls._admin_token is None: cls._admin_token = get_auth_token(ADMIN_USERNAME, ADMIN_PASSWORD) token = cls._admin_token else: if cls._token is None: cls._token = get_auth_token(USERNAME, PASSWORD) token = cls._token if use_token: headers = kwargs.get('headers', {}) headers.setdefault('Authorization', 'Token ' + token) kwargs['headers'] = headers expected = kwargs.pop('expected', 200) resp = requests.request(method, *args, **kwargs) if expected is not None: if hasattr(expected, '__iter__'): assert_in(resp.status_code, expected, "Expected http status in %s, received %s" % (expected, resp.status_code)) else: assert_equal(resp.status_code, expected, "Expected http status %s, received %s" % (expected, resp.status_code)) return resp
def test_valid_signature(self): for example in self._examples: client_shared_secret = example["private_key"] client_certificate = example["certificate"] public_key = example["public_key"] url = example["url"] method = example["method"] oauth_params = example["oauth_params"] expected_signature = example["oauth_signature"] # Using the RSA private key. assert_equal(expected_signature, generate_rsa_sha1_signature(client_shared_secret, method=method, url=url, oauth_params=oauth_params ) ) # Using the X.509 certificate. assert_true(verify_rsa_sha1_signature( client_certificate, expected_signature, method, url, oauth_params)) # Using the RSA public key. assert_true(verify_rsa_sha1_signature( public_key, expected_signature, method, url, oauth_params))
def test_valid_base_string(self): base_string = generate_signature_base_string("POST", "http://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2&a3=2+q" , self.oauth_params) assert_equal(base_string, "POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk9d7dh3k39sjv7")
def test_D8_D4_fill(): """ Tests the functionality of D4 filling. """ lfD8.map_depressions(pits=None, reroute_flow=False) lfD4.map_depressions(pits=None, reroute_flow=False) assert_equal(lfD8.number_of_lakes, 1) assert_equal(lfD4.number_of_lakes, 3) correct_D8_lake_map = np.empty(7*7, dtype=int) correct_D8_lake_map.fill(XX) correct_D8_lake_map[lake_nodes] = 10 correct_D4_lake_map = correct_D8_lake_map.copy() correct_D4_lake_map[lake_nodes[5:]] = 32 correct_D4_lake_map[lake_nodes[-2]] = 38 correct_D8_depths = np.zeros(7*7, dtype=float) correct_D8_depths[lake_nodes] = 2. correct_D4_depths = correct_D8_depths.copy() correct_D4_depths[lake_nodes[5:]] = 4. correct_D4_depths[lake_nodes[-2]] = 3. assert_array_equal(lfD8.lake_map, correct_D8_lake_map) assert_array_equal(lfD4.lake_map, correct_D4_lake_map) assert_array_almost_equal(mg1.at_node['depression__depth'], correct_D8_depths) assert_array_almost_equal(mg2.at_node['depression__depth'], correct_D4_depths)
def test_D8_D4_route(): """ Tests the functionality of D4 routing. """ frD8.route_flow() frD4.route_flow() lfD8.map_depressions() lfD4.map_depressions() assert_equal(lfD8.number_of_lakes, 1) assert_equal(lfD4.number_of_lakes, 3) flow_recD8 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 16, 10, 16, 10, 18, 13, 14, 14, 15, 16, 10, 18, 20, 21, 16, 16, 16, 18, 33, 27, 28, 28, 24, 24, 24, 32, 34, 35, 35, 38, 32, 32, 32, 41, 42, 43, 44, 45, 46, 47, 48]) flow_recD4 = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 7, 10, 17, 10, 11, 13, 14, 14, 15, 16, 17, 18, 20, 21, 21, 16, 17, 18, 33, 27, 28, 28, 29, 24, 31, 32, 34, 35, 35, 36, 37, 32, 33, 41, 42, 43, 44, 45, 46, 47, 48]) assert_array_equal(mg1.at_node['flow__receiver_node'], flow_recD8) assert_array_equal(mg2.at_node['flow__receiver_node'], flow_recD4) assert_array_almost_equal(mg1.at_node['drainage_area'].reshape((7,7))[:, 0].sum(), mg2.at_node['drainage_area'].reshape((7,7))[:, 0].sum())
def test_clear_cache (self): self.cache.clear_cache() assert_equal(self.cache.cache, {"IF" : {}, "CONF" : [], "EXEC" : []} )
def test_dict_assignment(): d = dict() c = DictTrait() c.value = d d["a"] = 5 nt.assert_equal(d, c.value) nt.assert_true(c.value is d)
def test_Logisticdegenerate(): X = W((40,10)) X[:,0] = X[:,1] + X[:,2] Y = np.greater(W((40,)), 0) cmodel = GLM(design=X, family=family.Binomial()) results = cmodel.fit(Y) assert_equal(results.df_resid, 31)
def test_zero_byte_string(): # Tests hack to allow chars of non-zero length, but 0 bytes # make reader-like thing str_io = cStringIO() r = _make_readerlike(str_io, boc.native_code) c_reader = m5u.VarReader5(r) tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')]) tag = np.zeros((1,), dtype=tag_dt) tag['mdtype'] = mio5p.miINT8 tag['byte_count'] = 1 hdr = m5u.VarHeader5() # Try when string is 1 length hdr.set_dims([1,]) _write_stream(str_io, tag.tostring() + asbytes(' ')) str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, ' ') # Now when string has 0 bytes 1 length tag['byte_count'] = 0 _write_stream(str_io, tag.tostring()) str_io.seek(0) val = c_reader.read_char(hdr) assert_equal(val, ' ') # Now when string has 0 bytes 4 length str_io.seek(0) hdr.set_dims([4,]) val = c_reader.read_char(hdr) assert_array_equal(val, [' '] * 4)
def test_log(): # Stack containing one non-matching object with capture_output() as out: t = Run3() t.run3() assert_equal(out[0], "[Run3.run3] method Test3\n[run] function run()\n") # Stack containing two matching objects with capture_output() as out: t = Run2() t.run2() assert_equal(out[0], "[Run2.run2] method Test2\n" "[Run2.run2] method Test\n" "[Run2.run2] function run()\n") # Stack containing one matching object with capture_output() as out: t = Run() t.run() assert_equal(out[0], "[Run.run] method Test\n[Run.run] function run()\n") # Stack containing no object with capture_output() as out: run() assert_equal(out[0], "[run] function run()\n") # Test stack_level too large with capture_output() as out: other_run() assert_equal(out[0], "[<top_level>] function other_run()\n")
def test_wilsonLT_Defaults_FeatureInput1(): '''Confirm default FeatureInput values.''' G = la.input_.Geometry load_params = { 'R': 12e-3, # specimen radius 'a': 7.5e-3, # support ring radius 'p': 5, # points/layer 'P_a': 1, # applied load 'r': 2e-4, # radial distance from center loading } # mat_props = {'HA' : [5.2e10, 0.25], # 'PSu' : [2.7e9, 0.33], # } mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} '''Find way to compare materials DataFrames and Geo_objects .''' actual = dft.FeatureInput expected = { 'Geometry': G('400-[200]-800'), 'Parameters': load_params, 'Properties': mat_props, 'Materials': ['HA', 'PSu'], 'Model': 'Wilson_LT', 'Globals': None } ##del actual['Geometry'] ##del actual['Materials'] nt.assert_equal(actual, expected)
def test_strict_type_guessing_with_large_file(self): fh = horror_fobj('211.csv') rows = CSVTableSet(fh).tables[0] offset, headers = headers_guess(rows.sample) rows.register_processor(offset_processor(offset + 1)) types = [StringType, IntegerType, DecimalType, DateUtilType] guessed_types = type_guess(rows.sample, types, True) assert_equal(len(guessed_types), 96) assert_equal(guessed_types, [ IntegerType(), StringType(), StringType(), StringType(), StringType(), StringType(), IntegerType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), IntegerType(), StringType(), DecimalType(), DecimalType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), IntegerType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), IntegerType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), StringType(), DateUtilType(), DateUtilType(), DateUtilType(), DateUtilType(), StringType(), StringType(), StringType()])
def test_check_symmetric(): arr_sym = np.array([[0, 1], [1, 2]]) arr_bad = np.ones(2) arr_asym = np.array([[0, 2], [0, 2]]) test_arrays = {'dense': arr_asym, 'dok': sp.dok_matrix(arr_asym), 'csr': sp.csr_matrix(arr_asym), 'csc': sp.csc_matrix(arr_asym), 'coo': sp.coo_matrix(arr_asym), 'lil': sp.lil_matrix(arr_asym), 'bsr': sp.bsr_matrix(arr_asym)} # check error for bad inputs assert_raises(ValueError, check_symmetric, arr_bad) # check that asymmetric arrays are properly symmetrized for arr_format, arr in test_arrays.items(): # Check for warnings and errors assert_warns(UserWarning, check_symmetric, arr) assert_raises(ValueError, check_symmetric, arr, raise_exception=True) output = check_symmetric(arr, raise_warning=False) if sp.issparse(output): assert_equal(output.format, arr_format) assert_array_equal(output.toarray(), arr_sym) else: assert_array_equal(output, arr_sym)
def test_unicode_decode_error(): # decode_error default to strict, so this should fail # First, encode (as bytes) a unicode string. text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon." text_bytes = text.encode('utf-8') # Then let the Analyzer try to decode it as ascii. It should fail, # because we have given it an incorrect encoding. wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, wa, text_bytes) ca = CountVectorizer(analyzer='char', ngram_range=(3, 6), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, ca, text_bytes) # Check the old interface with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") ca = CountVectorizer(analyzer='char', ngram_range=(3, 6), charset='ascii').build_analyzer() assert_raises(UnicodeDecodeError, ca, text_bytes) assert_equal(len(w), 1) assert_true(issubclass(w[0].category, DeprecationWarning)) assert_true("charset" in str(w[0].message).lower())
def test_pack_unpack_bytes_file(): array_ = np.linspace(0, 1e5) input_bytes = array_.tostring() with create_tmp_files() as (tdir, in_file, out_file, dcmp_file): pack_bytes_file(input_bytes, out_file) output_bytes = unpack_bytes_file(out_file) nt.assert_equal(input_bytes, output_bytes)
def test_tickabels_off(self): kws = self.default_kws.copy() kws['xticklabels'] = False kws['yticklabels'] = False p = mat._HeatMapper(self.df_norm, **kws) nt.assert_equal(p.xticklabels, []) nt.assert_equal(p.yticklabels, [])
def test_fetch_library_name_personal(self): self.node_settings.library_id = 'personal' assert_equal( self.node_settings.fetch_library_name, 'My library' )
def test_selected_library_name_empty(self): self.node_settings.library_id = None assert_equal( self.node_settings.fetch_library_name, '' )
def test_subproc_hello_mom_second(): fst = "echo 'hello'" sec = "echo 'mom'" s = '{0}; {1}'.format(fst, sec) exp = '{0}; $[{1}]'.format(fst, sec) obs = subproc_toks(s, lexer=LEXER, mincol=len(fst), returnline=True) assert_equal(exp, obs)
def test_fail_fetch_haxby_simple(): # Test a dataset fetching failure to validate sandboxing local_url = "file://" + os.path.join(datadir, "pymvpa-exampledata.tar.bz2") datasetdir = os.path.join(tmpdir, 'haxby2001_simple', 'pymvpa-exampledata') os.makedirs(datasetdir) # Create a dummy file. If sandboxing is successful, it won't be overwritten dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'w') dummy.write('stuff') dummy.close() path = 'pymvpa-exampledata' opts = {'uncompress': True} files = [ (os.path.join(path, 'attributes.txt'), local_url, opts), # The following file does not exists. It will cause an abortion of # the fetching procedure (os.path.join(path, 'bald.nii.gz'), local_url, opts) ] assert_raises(IOError, utils._fetch_files, os.path.join(tmpdir, 'haxby2001_simple'), files, verbose=0) dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'r') stuff = dummy.read(5) dummy.close() assert_equal(stuff, 'stuff')
def test_lazy_load_index(): f = StringIO() dump({'wakka': 42}, f) f.seek(0) lj = LazyJSON(f) assert_equal({'wakka': 10, '__total__': 0}, lj.offsets) assert_equal({'wakka': 2, '__total__': 14}, lj.sizes)
def test_subproc_hello_mom_first(): fst = "echo 'hello'" sec = "echo 'mom'" s = '{0}; {1}'.format(fst, sec) exp = '$[{0}]; {1}'.format(fst, sec) obs = subproc_toks(s, lexer=LEXER, maxcol=len(fst)+1, returnline=True) assert_equal(exp, obs)
def test_default_diverging_vlims(self): p = mat._HeatMapper(self.df_norm, **self.default_kws) vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max())) nt.assert_equal(p.vmin, -vlim) nt.assert_equal(p.vmax, vlim) nt.assert_true(p.divergent)
def test_peek1(): c = peek(io.StringIO('<foo>'), off=3) e = 'o' assert_equal(c, e)
def test_values_exceeded_time(self): self.rr._values = [(1, 10), (2, 2), (3, 1), (4, 4)] self.time.return_value = 4.0001 nt.assert_equal(self.rr.values, [2, 1, 4])
def test_notify(self): result = self.histogram.notify(1.2) nt.assert_equal(self.reservoir.add.call_args_list, [mock.call(1.2)]) nt.assert_equal(result, self.reservoir.add.return_value)
def test_sorted_values(self): self.rr._values = [(1, 10), (2, 2), (3, 1), (4, 4)] nt.assert_equal(self.rr.sorted_values, [1, 2, 4, 10])
def test_long_delay(self): for i in range(1, 6): self._add_after(0.5 + i, 1) # this emulates a new value after 15 hours: in that case the times are too small and collapse to zero nt.assert_equal(self._add_after(10, 3600.0 * 15), [2.5, 10.0])
def test_add_first(self): nt.assert_equal(self._add_after(1.5, 1), [1.5]) nt.assert_equal(self._add_after(2.5, 1), [1.5, 2.5]) nt.assert_equal(self._add_after(3.5, 1), [1.5, 3.5, 2.5]) nt.assert_equal(self._add_after(4.5, 1), [1.5, 3.5, 4.5, 2.5]) nt.assert_equal(self._add_after(5.5, 1), [5.5, 1.5, 3.5, 4.5, 2.5])
def test_sorted_values(self): self.rr._values = [(1, 10), (2, 2), (3, 1), (4, 4)] self.time.return_value = 4.0001 nt.assert_equal(self.rr.sorted_values, [1, 2, 4])
def test_add_exceeded_time(self): self.time.return_value = 1 self.rr.add(1) nt.assert_equal(list(self.rr._values), [(1, 1)]) self.time.return_value = 1.1 self.rr.add(2) nt.assert_equal(list(self.rr._values), [(1, 1), (1.1, 2)]) self.time.return_value = 1.2 self.rr.add(3) nt.assert_equal(list(self.rr._values), [(1, 1), (1.1, 2), (1.2, 3)]) self.time.return_value = 1.3 self.rr.add(4) nt.assert_equal(list(self.rr._values), [(1, 1), (1.1, 2), (1.2, 3), (1.3, 4)]) self.time.return_value = 3.1 self.rr.add(5) nt.assert_equal(list(self.rr._values), [(1, 1), (1.1, 2), (1.2, 3), (1.3, 4), (3.1, 5)]) self.time.return_value = 4.05 self.rr.add(6) nt.assert_equal(list(self.rr._values), [(1.1, 2), (1.2, 3), (1.3, 4), (3.1, 5), (4.05, 6)]) self.time.return_value = 4.1 self.rr.add(7) nt.assert_equal(list(self.rr._values), [(1.1, 2), (1.2, 3), (1.3, 4), (3.1, 5), (4.05, 6), (4.1, 7)]) self.time.return_value = 4.2 self.rr.add(8) nt.assert_equal(list(self.rr._values), [(1.3, 4), (3.1, 5), (4.05, 6), (4.1, 7), (4.2, 8)]) self.time.return_value = 10 self.rr.add(9) nt.assert_equal(list(self.rr._values), [(10, 9)])
def test_unfold_signal_by_keyword(self): s = self.s.deepcopy() s.unfold(unfold_navigation=False, unfold_signal=True) nt.assert_equal(s.axes_manager.signal_shape, (self.s.axes_manager.signal_size,))
def test_values(self): self.rr._values = [(1, 10), (1.5, 1.5), (2, 2), (3, 3)] self.time.return_value = 3.0 nt.assert_equal(self.rr.values, [10, 1.5, 2, 3])
def test_unfold_signal(self): s = self.s.deepcopy() s.unfold_signal_space() meta_am = s.metadata.Signal.Noise_properties.variance.axes_manager nt.assert_equal( meta_am.signal_shape, (self.s.axes_manager.signal_size,))
def test_unfold_nothing_by_keyword(self): s = self.s.deepcopy() s.unfold(unfold_navigation=False, unfold_signal=False) nt.assert_equal(s.data.shape, self.s.data.shape)
def test_opening_closing(): bs = '<foo></foo>' i = list(tok(io.StringIO(bs))) e = [(opening, '<foo>'), (closing, '</foo>')] assert_equal(i, e)
def test_unfold_signal(self): s = self.s.deepcopy() s.unfold_signal_space() nt.assert_equal(s.axes_manager.signal_shape, (self.s.axes_manager.signal_size,))
def test_tok_selfclosing(): e = [(selfclosing, '<foo/>')] t = list(tok(io.StringIO('<foo/>'))) assert_equal(t, e)
def test_unfold_navigation(self): s = self.s.deepcopy() s.unfold_navigation_space() nt.assert_equal(s.axes_manager.navigation_shape, (self.s.axes_manager.navigation_size,))
def test_peek(): bnil = '' bsnil = io.StringIO(bnil) assert_equal(bnil, peek(bsnil))
def test_text_opening(): bs = 'text<foo>' t = list(tok(io.StringIO(bs))) e = [(text, 'text'), (opening, '<foo>')] assert_equal(t, e)
def test_tok_error_comment(): e = [(error, '<!--foo>')] t = list(tok(io.StringIO('<!--foo>'))) assert_equal(t, e)
def test_tok_doctype(): e = [(doctype, '<!doctype>')] t = list(tok(io.StringIO('<!doctype>'))) assert_equal(t, e)
def test_tok_opening(): e = [(opening, '<foo>')] t = list(tok(io.StringIO('<foo>'))) assert_equal(t, e)
def test_tok_error_instruction(): e = [(error, '<?instruction>')] t = list(tok(io.StringIO('<?instruction>'))) assert_equal(t, e)
def test_empty_text(): t = list(tok(io.StringIO(''))) e = [] assert_equal(t, e)
def test_tok_instruction(): e = [(instruction, '<?foo?>')] t = list(tok(io.StringIO('<?foo?>'))) assert_equal(t, e)
def test_text_opening_text_closing(): bs = 'eww<foo>bar</foo>' i = list(tok(io.StringIO(bs))) e = [(text, 'eww'), (opening, '<foo>'), (text, 'bar'), (closing, '</foo>')] assert_equal(i, e)
def test_tok_text(): e = [(text, 'foo')] t = list(tok(io.StringIO('foo'))) assert_equal(t, e)
def test_raw_data(self): result = self.histogram.raw_data() nt.assert_equal(result, self.reservoir.values)
def test_peek_above_left(): return assert_equal(peek(io.StringIO('<foo>'), off=6), '>')
def test_instruction_too_short(): bs = '<?xml version="1.0" encoding="UTF-8"?' i = list(tok(io.StringIO(bs))) e = [(error, '<?xml version="1.0" encoding="UTF-8"?')] assert_equal(i, e)
def test_instruction_text_instruction(): bs = '<?xml version="1.0" encoding="UTF-8"?>text<?instruction?>' i = list(tok(io.StringIO(bs))) e = [(instruction, '<?xml version="1.0" encoding="UTF-8"?>'), (text, 'text'), (instruction, '<?instruction?>')] assert_equal(i, e)
def test_tfidf_no_smoothing(): X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) # the lack of smoothing make IDF fragile in the presence of feature with # only zeros X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') # First we need to verify that numpy here provides div 0 warnings with warnings.catch_warnings(record=True) as w: 1. / np.array([0.]) numpy_provides_div0_warning = len(w) == 1 with warnings.catch_warnings(record=True) as w: tfidf = tr.fit_transform(X).toarray() if not numpy_provides_div0_warning: raise SkipTest("Numpy does not provide div 0 warnings.") assert_equal(len(w), 1) # For Python 3 compatibility if hasattr(w[0].message, 'args'): assert_true("divide by zero" in w[0].message.args[0]) else: assert_true("divide by zero" in w[0].message)