def test_beautiful_drange_dates(self): dates = [ 27 / Mar / 1994, 28 / Mar / 1994, 29 / Mar / 1994, 30 / Mar / 1994, 31 / Mar / 1994, 1 / Apr / 1994, 2 / Apr / 1994, 3 / Apr / 1994, 4 / Apr / 1994, 5 / Apr / 1994, ] for d, e in zipl(drange(27 / Mar / 1994, 5 / Apr / 1994), dates[:-1]): self.assertEqual(d, e) for d, e in zipl(drange(27 / Mar / 1994, 5 / Apr / 1994, 2 * days), dates[:-1:2]): self.assertEqual(d, e) for d, e in zipl(drange(5 / Apr / 1994, 27 / Mar / 1994, -2 * days), dates[:0:-2]): self.assertEqual(d, e) today = date.today() dates = [ today, today + 2 * days, today + 4 * days ] for d, e in zipl(drange(today + 5 * days, step=2 * days), dates): self.assertEqual(d, e)
def _parse(cls, color, data=None, alpha=util.DEF_ALPHA, *, variables=None, **kwargs): """Parse the color.""" obj = None if isinstance(color, str): # Parse a color space name and coordinates if data is not None: s = color space_class = cls.CS_MAP.get(s) if not space_class: raise ValueError("'{}' is not a registered color space") num_channels = len(space_class.CHANNELS) if len(data) < num_channels: data = list(data) + [alg.NaN] * (num_channels - len(data)) coords = [ alg.clamp(float(v), *c.limit) for c, v in zipl(space_class.CHANNELS, data) ] coords.append( alg.clamp(float(alpha), *space_class.channels[-1].limit)) obj = space_class, coords # Parse a CSS string else: m = cls._match(color, fullmatch=True, variables=variables) if m is None: raise ValueError("'{}' is not a valid color".format(color)) coords = [ alg.clamp(float(v), *c.limit) for c, v in zipl(m[0].CHANNELS, m[1]) ] coords.append(alg.clamp(float(m[2]), *m[0].channels[-1].limit)) obj = m[0], coords elif isinstance(color, Base): # Handle a color instance space_class = cls.CS_MAP.get(color.space()) if not space_class: raise ValueError("'{}' is not a registered color space") obj = space_class, color[:] elif isinstance(color, Mapping): # Handle a color dictionary space = color['space'] coords = color['coords'] alpha = color.get('alpha', 1.0) obj = cls._parse(space, coords, alpha) else: raise TypeError("'{}' is an unrecognized type".format(type(color))) if obj is None: raise ValueError("Could not process the provided color") return obj
def _parse(cls, color: ColorInput, data: Optional[VectorLike] = None, alpha: float = util.DEF_ALPHA, **kwargs: Any) -> Tuple[Space, List[float]]: """Parse the color.""" if isinstance(color, str): # Parse a color space name and coordinates if data is not None: s = color space_class = cls.CS_MAP.get(s) if not space_class: raise ValueError("'{}' is not a registered color space") num_channels = len(space_class.CHANNELS) if len(data) < num_channels: data = list(data) + [alg.NaN] * (num_channels - len(data)) coords = [ alg.clamp(float(v), *c.limit) for c, v in zipl(space_class.CHANNELS, data) ] coords.append( alg.clamp(float(alpha), *space_class.channels[-1].limit)) obj = space_class, coords # Parse a CSS string else: m = cls._match(color, fullmatch=True) if m is None: raise ValueError("'{}' is not a valid color".format(color)) coords = [ alg.clamp(float(v), *c.limit) for c, v in zipl(m[0].CHANNELS, m[1]) ] coords.append(alg.clamp(float(m[2]), *m[0].channels[-1].limit)) obj = m[0], coords elif isinstance(color, Color): # Handle a color instance space_class = cls.CS_MAP.get(color.space()) if not space_class: raise ValueError("'{}' is not a registered color space") obj = space_class, color[:] elif isinstance(color, Mapping): # Handle a color dictionary space = color['space'] coords = color['coords'] alpha = color.get('alpha', 1.0) obj = cls._parse(space, coords, alpha) else: raise TypeError("'{}' is an unrecognized type".format(type(color))) return obj
def grouper(iterable, n, fillvalue=None): """ Group iterable into n sized chunks. See: http://stackoverflow.com/a/312644/758157 """ args = [iter(iterable)] * n return zipl(*args, fillvalue=fillvalue)
def sum2(a, b): res, c = [], 0 for d1, d2 in zipl(reversed(a), reversed(b), fillvalue=0): s = int(d1) + int(d2) + c res.append(s % 10) c = s // 10 if c: res.append(c) return ''.join(map(str, reversed(res)))
def writeloops(self, loopnum=None, dir=None, append='--Corr', log=True): ''' Write H, M arrays for loops to a file ''' if loopnum is None: # if loopnum not given, make choice based on file name loopnummap = {'easy':2, 'hard':2, 'minor':'all'} lfilename = os.path.split(self.filepath)[1].lower() for k in loopnummap: if k in lfilename: loopnum = loopnummap[k] if loopnum is None: # if none of the words in loopnummap are found, default to 'all' loopnum = 'all' loopind = self._loopind(loopnum) indir, fn = os.path.split(self.filepath) outdir = indir if dir is None else dir loopfn = os.path.splitext(fn)[0] + append + '.csv' looppath = os.path.join(outdir, loopfn) # if file exists, start appending numbers if os.path.isfile(looppath): matches = fnmatch.filter(os.listdir(outdir), '??'.join(os.path.splitext(loopfn))) if not any(matches): looppath = '_2'.join(os.path.splitext(looppath)) else: n = np.max([int(p[-5]) for p in matches]) looppath = ('_'+str(n+1)).join(os.path.splitext(looppath)) # Output will be alternating H, M, H, M, ... # not straightforward because loops may have different lengths # filter out unwanted loops, convert to kOe H, M = [], [] for i, [h, m] in enumerate(zip(self.H, self.M)): if i in loopind: H.append(h/1000) M.append(m) # Append the interpolated minor loop at the end if it exists if hasattr(self, 'H_zminor'): H.append(self.H_zminor) M.append(self.M_zminor) # interleave loops, with padding empty spaces with None # don't ask... raggedlooparray = zipl(*[x for t in zip(H, M) for x in t]) with open(looppath, "wb") as f: # lines terminate with \r\n by default, change to \n excelmod = csv.excel() excelmod.lineterminator = '\n' writer = csv.writer(f, dialect=excelmod) writer.writerows(raggedlooparray) print('Loop(s) {} written to {}'.format(loopnum, looppath)) self.log += '{}: Wrote loop(s) {} to disk: {}\n'.format(_now(), loopnum, looppath) if log: self.writelog(dir=dir)
def hash_round(lens, elems, pos=0, skip=0, accumulator=lambda x, y: (y[0], reduce(sum, x))): for (skip, s), pos in accumulate(zipl(enumerate(lens, skip), [pos]), accumulator): reverse_sublist(elems, pos % len(elems), (pos + s) % len(elems)) return elems, skip + s + pos, skip + 1
def match(self, pystachio_object): _, refs = pystachio_object.interpolate() for ref in refs: args = [] zips = list(zipl(self._components, ref.components())) for pattern, component in zips[:len(self._components)]: if pattern.__class__ != component.__class__ or not pattern.value.match(component.value): break args.append(component.value) else: yield tuple(args)
def neighbors(text): '''Return iterables n, s, w and e of neighbors of text. For a given text, return four character-yielding iterables that are respectively the neighbors above, below, to the left and to the right of the corresponding character in the text. Inexistant neighbors are represented by None. ''' lines = text.splitlines(True) n = [None for _ in lines[0]] + flatten([ map(itemgetter(0), islice(zipl(prev_line, line), len(line))) for prev_line, line in zip(lines, lines[1:]) ]) s = flatten([ map(itemgetter(1), islice(zipl(line, next_line), len(line))) for line, next_line in zip(lines, lines[1:]) ]) + [None for _ in lines[-1]] w = flatten([[None] + list(line[:-1]) for line in lines]) e = flatten([list(line[1:]) + [None] for line in lines]) return n, s, w, e
def hist(plug, range, fmt="svg", out="img.svg", time_unit="us"): stages = yaml.safe_load(range) db_init("m0play.db") db_connect() plt.figure(figsize=(12, 4)) nr_stages = len(stages) for nr, s in enumerate(stages, 1): r = dict(zipl(["from", "to", "end"], s, fillvalue=None)) plt.subplot(1, nr_stages, nr) plt.grid(True) query(r["from"], r["to"], r["end"], plug, time_unit) db_close() plt.savefig(fname=out, format=fmt)
def test_beautiful_delta_dates(self): dates = [ 27 / Mar / 1994, 28 / Mar / 1994, 29 / Mar / 1994, 30 / Mar / 1994, 31 / Mar / 1994, 1 / Apr / 1994, 2 / Apr / 1994, 3 / Apr / 1994, 4 / Apr / 1994, 5 / Apr / 1994, ] for d, e in zipl(drange(27 / Mar / 1994, 5 / Apr / 1994), dates[:-1]): self.assertEqual(d, e) for d, e in zipl(drange(27 / Mar / 1994, 5 / Apr / 1994, 2 * days), dates[:-1:2]): self.assertEqual(d, e) for d, e in zipl(drange(5 / Apr / 1994, 27 / Mar / 1994, -2 * days), dates[:0:-2]): self.assertEqual(d, e)
def hist(db_name, plug, range, fmt="svg", out="img.svg", time_unit="us", rows=1, size=(12, 4)): stages = yaml.safe_load(range) db_init(db_name) db_connect() plt.figure(figsize=size) nr_stages = len(stages) columns = nr_stages // rows + (1 if nr_stages % rows > 0 else 0) for nr, s in enumerate(stages, 1): r = dict(zipl(["from", "to", "end"], s, fillvalue=None)) plt.subplot(rows, columns, nr) plt.grid(True) query(r["from"], r["to"], r["end"], plug, time_unit) db_close() plt.savefig(fname=out, format=fmt)
def hash_round(lens, elems, pos=0, skip=0, accumulator=lambda x, y: (y[0], reduce(sum, x))): for (skip, s), pos in accumulate(zipl(enumerate(lens, skip), [pos]), accumulator): reverse_sublist(elems, pos % len(elems), (pos+s) % len(elems)) return elems, skip+s+pos, skip+1
def transpose_ct(ct: bytes, keysize: int) -> List[bytes]: """Break repkey-XOR ciphertext into single-byte XOR parts.""" keysize_parts = (ct[i:i + keysize] for i in range(0, len(ct), keysize)) return [bytes(filter(None.__ne__, part)) for part in zipl(*keysize_parts)]
def solve(self, root0, root1): return all(x == y for x, y in zipl(leaves(root0), leaves(root1)))
def _build_threads(self, ax, style): styles = {} if not style else style return [ self._new_thread(ax, style) for style, _ in zipl(styles, self.func(), fillvalue={}) ]
def solve(self, root): return all(x == y for x, y in zipl(it(root), it(root, False)))
def buildAndTrainNewModelGroup(): global LOG # GET PARAMETERS # # identification name for the new group. All _ characters will be removed in the final name group_name = request.get_json().get('group_name').replace('_', '') # d2v or w2v models_type = request.get_json().get('models_type') # path to training files. Allows: # - Path to file with a list of training files. Each line of this file must be a training file path. # - Path to folder with the training files. # Absolute path (if starts with '/') or relative path from the Corpus folder training_docs_path = request.get_json().get('training_docs_path') # percentage_training_corpus. Default 100% percentage_training_corpus = request.get_json().get( 'percentage_training_corpus') # folder path where the new group will be saved after the training. # Absolute path (if starts with '/') or relative path from the Corpus folder models_folder = request.get_json().get('models_folder') # training hyperparameters lists to create all models. This input must be a dict with # key = parameter name # value = list of values for this parameter # the models will be created with all possible combinations of every value params = request.get_json().get('params') # FIX PATHS # # actual absolute path of the 'models_folder' abs_models_folder = korpus_dir + "/" + models_folder if not models_folder.startswith( "/") else models_folder # remove the last '/' if exists abs_models_folder = abs_models_folder[:-1] if abs_models_folder.endswith( "/") else abs_models_folder # actual absolute path of the 'training_docs_path' abs_training_docs_path = korpus_dir + "/" + training_docs_path if not training_docs_path.startswith( "/") else training_docs_path # remove the last '/' if exists abs_training_docs_path = abs_training_docs_path[: -1] if abs_training_docs_path.endswith( "/" ) else abs_training_docs_path # set percentage_training_corpus in the range [0,100] if int(percentage_training_corpus) < 0: percentage_training_corpus = 0 elif int(percentage_training_corpus) > 100: percentage_training_corpus = 100 else: percentage_training_corpus = int(percentage_training_corpus) # append new server log message LOG.append( "Build new '%s' models group '%s' in folder '%s', with files in '%s'" % (models_type, group_name, abs_models_folder, abs_training_docs_path)) # GET TRAINING TEXT FILES # # opens the file/folder with all training files and stores them in 'abs_training_files' variable. abs_training_files = [] # file with the path to a training file per line if os.path.isfile(abs_training_docs_path): with open(abs_training_docs_path) as df: for file_path in df: file_path = file_path.strip() # Each line may be a relative path or a absolute path (if it starts with '/'). abs_file_path = korpus_dir + "/" + file_path if not file_path.startswith( "/") else file_path abs_training_files.append(abs_file_path) # directory where all training files are located elif os.path.isdir(abs_training_docs_path): abs_training_files = [ abs_training_docs_path + "/" + file for file in os.listdir(abs_training_docs_path) ] else: LOG.append( "ERROR: '%s' path does not exist. Invalid training_docs_path argument." % abs_training_docs_path) return jsonify({ 'reason': "invalid argument", 'msg': "'%s' path does not exist. Invalid training_docs_path argument." % abs_training_docs_path }), 400 # CALCULATE HYPERPARAMETERS LISTS # parameters_list = [] # store all hyperparameters names nlist = [name for name in params.keys() if len(params[name]) > 0] # store all values lists vlist = [params[name] for name in nlist] # add the default value of all other parameters (not received in the request). # These values are extracted from the file params.json. with open('params.json') as params_file: all_hparams_json = json.load(params_file) all_hparams_json = all_hparams_json[ "word2vec"] if models_type == "w2v" else all_hparams_json["doc2vec"] not_defined_hparams = [ hp for hp in all_hparams_json if hp["name"] not in nlist ] nlist.extend([ndhp["name"] for ndhp in not_defined_hparams]) vlist.extend([[ndhp["default"]] for ndhp in not_defined_hparams]) # compute the cartesian product of the values lists. # The result is a list of lists with all combinations of all values. values_prod = prod(*vlist) # fill 'parameters_list' with a list of dicts with each combination of parameters: # [{vector_size: 10, window: 10}, {vector_size: 10, window: 11}, {vector_size: 10, window: 12}, ...] for values in values_prod: new_json = dict() for name, value in zipl(nlist, values, fillvalue=None): new_json[name] = value parameters_list.append(new_json) # CREATE CORPUS AND TRAIN MODELS # # call _trainD2VGroupFromTxtFilePaths to create the new group of d2v or w2v models and train them with the # received hyperparameters ('parameters_list'). # This function also apply the percentage to the training corpus ('percentage_training_corpus' to 'abs_training_files'). if models_type == "d2v": abs_models_folder = abs_models_folder + "/Doc2Vec" new_group = _trainD2VGroupFromTxtFilePaths( training_files_paths=abs_training_files, models_folder=abs_models_folder, group_name=group_name, parameters_list=parameters_list, percentage_training_corpus=percentage_training_corpus, flag_remove_stopWords=flag_remove_stopWords, LOG=LOG) else: abs_models_folder = abs_models_folder + "/Word2Vec" new_group = _trainW2VGroupFromTxtFilePaths( training_files_paths=abs_training_files, models_folder=abs_models_folder, group_name=group_name, parameters_list=parameters_list, percentage_training_corpus=percentage_training_corpus, flag_remove_stopWords=flag_remove_stopWords, LOG=LOG) # RETURN POST RESPONSE # # return the new group in a dict with the name of the group and a list with all models in the group group_models = [] for i, model in enumerate(new_group): group_models.append({ 'model': i, 'total_training_time': model.total_train_time }) return jsonify({'group': group_name, 'models': group_models}), 200