def filled_stringify_enumerate(new_iter): """ Takes each element yielded by new_iter and reverses it using reversed. Args: new_iter(iter): an iterator to use for enumeration over. Returns: (generator object): an iterator over the reversed elements. Examples: >>> filled_stringify_enumerate([5, 7]) #doctest: +ELLIPSIS <generator object filled_stringify_enumerate at 0x...> >>> list(filled_stringify_enumerate([])) [] >>> list(filled_stringify_enumerate(irange(5))) [(0, '0', 0), (1, '1', 1), (2, '2', 2), (3, '3', 3), (4, '4', 4)] >>> list(filled_stringify_enumerate(irange(2, 5))) [(0, '0', 2), (1, '1', 3), (2, '2', 4)] >>> list(filled_stringify_enumerate([5])) [(0, '0', 5)] >>> list(filled_stringify_enumerate([5, 7])) [(0, '0', 5), (1, '1', 7)] >>> list(filled_stringify_enumerate(iter([5, 7]))) [(0, '0', 5), (1, '1', 7)] >>> list(filled_stringify_enumerate(range(11))) [(0, '00', 0), (1, '01', 1), (2, '02', 2), (3, '03', 3), (4, '04', 4), (5, '05', 5), (6, '06', 6), (7, '07', 7), (8, '08', 8), (9, '09', 9), (10, '10', 10)] """ new_list = new_iter new_list_index_gen = None try: new_list_index_gen = irange(len(new_list)) except TypeError: new_list = list(new_list) new_list_index_gen = irange(len(new_list)) new_list_index_gen_stringified = filled_stringify_numbers( new_list_index_gen, include_numbers=True ) for (i, i_str), each in izip(new_list_index_gen_stringified, new_list): yield ((i, i_str, each))
def test1a(self): dict_type = collections.OrderedDict params = collections.OrderedDict() params["b"] = list(irange(3)) params["c"] = "test" params["a"] = 5 params["d"] = collections.OrderedDict(params) params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["d"] = dict_type(params["d"]) params["h"][0] = dict_type(params["h"][0]) params = dict_type(params) config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) with open(config_filename, "r") as config_file: params_raw_out = json.load(config_file) assert params == params_raw_out params_out = nanshe.io.xjson.read_parameters(config_filename, maintain_order=True) assert params == params_out
def test1a(self): dict_type = collections.OrderedDict params = collections.OrderedDict() params["b"] = list(irange(3)) params["c"] = "test" params["a"] = 5 params["d"] = collections.OrderedDict(params) params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["d"] = dict_type(params["d"]) params["h"][0] = dict_type(params["h"][0]) params = dict_type(params) config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) with open(config_filename, "r") as config_file: params_raw_out = json.load(config_file) assert params == params_raw_out params_out = nanshe.io.xjson.read_parameters( config_filename, maintain_order=True ) assert params == params_out
def test_run_multiprocessing_queue_spams_trainDL_4(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) out_queue = Queue() nanshe.box.spams_sandbox.run_multiprocessing_queue_spams_trainDL( out_queue, self.g3.astype(float), D=self.g3.astype(float), **{ "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } ) d3 = out_queue.get() d3 = (d3 != 0) self.g3 = self.g3.transpose() d3 = d3.transpose() assert (self.g3.shape == d3.shape) assert (self.g3.astype(bool).max(axis=0) == d3.astype(bool).max(axis=0)).all() unmatched_g3 = range(len(self.g3)) matched = dict() for i in irange(len(d3)): new_unmatched_g3 = [] for j in unmatched_g3: if not (d3[i] == self.g3[j]).all(): new_unmatched_g3.append(j) else: matched[i] = j unmatched_g3 = new_unmatched_g3 print(unmatched_g3) assert (len(unmatched_g3) == 0) assert (self.g3.astype(bool) == d3.astype(bool)).all()
def teardown(self): import shutil for i in irange(len(self.temp_files)): self.temp_files[i].close() self.temp_files = [] shutil.rmtree(self.temp_dir) self.temp_dir = ""
def test_call_spams_trainDL_3(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) d = nanshe.box.spams_sandbox.call_spams_trainDL( self.g.astype(float), D=self.g.astype(float), **{ "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } ) d = (d != 0) self.g = self.g.transpose() d = d.transpose() assert (self.g.shape == d.shape) assert (self.g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(self.g)) matched = dict() for i in irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == self.g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (self.g.astype(bool) == d.astype(bool)).all()
def setup(self): import tempfile self.temp_dir = tempfile.mkdtemp() self.temp_files = [] temp_files_dict = dict() for i in irange(TestXGlob.num_files): each_tempfile = tempfile.NamedTemporaryFile(suffix=".tif", dir=self.temp_dir) temp_files_dict[each_tempfile.name] = each_tempfile for each_filename in sorted(temp_files_dict.keys()): self.temp_files.append(temp_files_dict[each_filename])
def test0b(self): dict_type = dict params = collections.OrderedDict() params["b"] = list(irange(3)) params["b"].append("__comment__ to drop") params["c"] = "test" params["a"] = 5 params["d"] = collections.OrderedDict(params) params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["e"] = "__comment__ will be removed" params["__comment__ e"] = "also will be removed" params["f"] = u"will not be unicode" params["d"] = dict_type(params["d"]) params["h"][0] = dict_type(params["h"][0]) params = dict_type(params) config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) with open(config_filename, "r") as config_file: params_raw_out = json.load(config_file) assert params == params_raw_out params_out = nanshe.io.xjson.read_parameters( config_filename ) params["b"] = params["b"][:-1] params["d"]["b"] = params["d"]["b"][:-1] params["h"][0]["b"] = params["h"][0]["b"][:-1] params["g"][0][-1] = params["g"][0][-1][:-1] del params["e"] del params["__comment__ e"] if str != unicode: params["f"] = params["f"].encode("utf-8") assert params == params_out
def test1b(self): dict_type = collections.OrderedDict params = collections.OrderedDict() params["b"] = list(irange(3)) params["b"].append("__comment__ to drop") params["c"] = "test" params["a"] = 5 params["d"] = collections.OrderedDict(params) params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["e"] = "__comment__ will be removed" params["__comment__ e"] = "also will be removed" params["f"] = u"will not be unicode" params["d"] = dict_type(params["d"]) params["h"][0] = dict_type(params["h"][0]) params = dict_type(params) config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) with open(config_filename, "r") as config_file: params_raw_out = json.load(config_file) assert params == params_raw_out params_out = nanshe.io.xjson.read_parameters(config_filename, maintain_order=True) params["b"] = params["b"][:-1] params["d"]["b"] = params["d"]["b"][:-1] params["h"][0]["b"] = params["h"][0]["b"][:-1] params["g"][0][1] = params["g"][0][1][:-1] del params["e"] del params["__comment__ e"] if str != unicode: params["f"] = params["f"].encode("utf-8") assert params == params_out
def lagged_generators(new_iter, n=2): """ Creates a tuple of generators with each next generator one step ahead of the previous generator. Args: new_iter(iter): an iterator or something that can be turned into an iterator n(int): number of generators to create as lagged Returns: (tuple of generator objects): a tuple of iterators with each one step in front of the others. Examples: >>> lagged_generators(irange(5), 1) #doctest: +ELLIPSIS (<itertools... object at 0x...>,) >>> list(izip(*lagged_generators(irange(5), 1))) [(0,), (1,), (2,), (3,), (4,)] >>> list(izip(*lagged_generators(irange(5), 2))) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> list(izip(*lagged_generators(irange(5)))) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> list(izip(*lagged_generators(irange(5), 3))) [(0, 1, 2), (1, 2, 3), (2, 3, 4)] >>> list(izip_longest(*lagged_generators(irange(5)))) [(0, 1), (1, 2), (2, 3), (3, 4), (4, None)] >>> list(izip_longest(*lagged_generators(irange(5), 3))) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, None), (4, None, None)] """ warnings.warn( "Please use `lagged_generators_zipped` instead.", DeprecationWarning ) assert (n >= 0), \ "Only a positive semi-definite number of generators can be created." # Where they will be stored all_iters = tuple() # If some positive definite number of generators is requested, then fill # the list. if n > 0: # Convert to the same type next_iter = itertools.tee(new_iter, 1)[0] for i in irange(1, n): # Duplicate the iterator prev_iter, next_iter = itertools.tee(next_iter, 2) # Store the copy of the old one all_iters += (prev_iter,) # Attempt to advance the next one # If it fails, create an empty iterator. try: next(next_iter) except StopIteration: next_iter = itertools.tee([], 1)[0] # Add the last one. If n == 1, the last one is the only one. all_iters += (next_iter,) return(all_iters)
def xrange_with_skip(start, stop=None, step=None, to_skip=None): """ Behaves as irange does except allows for skipping arbitrary values, as well. These values to be skipped should be specified using some iterable. Args: start(int): start for irange or if stop is not specified this will be stop. stop(int): stop for irange. stop(int): step for irange. to_skip(iter): some form of iterable or list of elements to skip (can be a single value as well). Returns: (generator object): an irange-like generator that skips some values. Examples: >>> xrange_with_skip(10) #doctest: +ELLIPSIS <generator object xrange_with_skip at 0x...> >>> list(xrange_with_skip(10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(0, 10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(1, 10)) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(0, 10, 2)) [0, 2, 4, 6, 8] >>> list(xrange_with_skip(1, 10, 2)) [1, 3, 5, 7, 9] >>> list(xrange_with_skip(10, to_skip = 2)) [0, 1, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(10, to_skip = [2, 7])) [0, 1, 3, 4, 5, 6, 8, 9] >>> list(xrange_with_skip(10, to_skip = [0])) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(1, 10, to_skip = [0])) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(xrange_with_skip(10, to_skip = [9])) [0, 1, 2, 3, 4, 5, 6, 7, 8] """ full = None if (stop is None): full = iter(irange(start)) elif (step is None): full = iter(irange(start, stop)) else: full = iter(irange(start, stop, step)) if to_skip is None: to_skip = iter([]) else: try: to_skip = iter(sorted(set(to_skip))) except TypeError: to_skip = iter([to_skip]) next_to_skip = next(to_skip, None) for each in full: if each != next_to_skip: yield(each) else: next_to_skip = next(to_skip, None)
else: try: to_skip = iter(sorted(set(to_skip))) except TypeError: to_skip = iter([to_skip]) next_to_skip = next(to_skip, None) for each in full: if each != next_to_skip: yield(each) else: next_to_skip = next(to_skip, None) splitting_xrange = lambda a, *args: disperse(irange(a, *args)) @prof.log_call(trace_logger) def reverse_each_element(new_iter): """ Takes each element yielded by new_iter and reverses it using reversed. Args: new_iter(iter): an iterator or something that can be turned into an iterator. Returns: (generator object): an iterator over the reversed elements. Examples:
def test_run_multiprocessing_array_spams_trainDL_4(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) float_type = numpy.float64 g3_array = npctypes.shared.ndarray(self.g3.shape, float_type, "F") with npctypes.shared.as_ndarray(g3_array) as g3_array_numpy: g3_array_numpy[...] = self.g3 del g3_array_numpy result_array = npctypes.shared.ndarray((self.g3.shape[0], self.g3.shape[1]), float_type, "F") nanshe.box.spams_sandbox.run_multiprocessing_array_spams_trainDL( type(result_array), result_array, type(g3_array), g3_array, False, type(g3_array), g3_array, **{ "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } ) d3 = None with npctypes.shared.as_ndarray(result_array) as d3: d3 = (d3 != 0) self.g3 = self.g3.transpose() d3 = d3.transpose() assert (self.g3.shape == d3.shape) assert (self.g3.astype(bool).max(axis=0) == d3.astype(bool).max(axis=0)).all() unmatched_g3 = range(len(self.g3)) matched = dict() for i in irange(len(d3)): new_unmatched_g3 = [] for j in unmatched_g3: if not (d3[i] == self.g3[j]).all(): new_unmatched_g3.append(j) else: matched[i] = j unmatched_g3 = new_unmatched_g3 print(unmatched_g3) assert (len(unmatched_g3) == 0) assert (self.g3.astype(bool) == d3.astype(bool)).all()