def get_dataset(overlap=5, window_size=10, time_steps=20, language=None, max_len=80): if language: dataset = _.filter(processed, lambda x: x['language'] == language) else: dataset = processed num_features = 7 # number of features on each time step # x,y,meta = [],[],[] genuine = forged = None if not max_len: pass else: for val in dataset: update = False g = val['values'][:20] f = val['values'][20:] g = np.array( _.map( _.filter(g, lambda i: len(i) >= max_len), lambda x: window( trim(x, max_len).reshape(-1), window_size * num_features, overlap * num_features, True))) f = np.array( _.map( _.filter(f, lambda i: len(i) >= max_len), lambda x: window( trim(x, max_len).reshape(-1), window_size * num_features, overlap * num_features, True))) if genuine is None and g.size > 0: genuine = g update = True if forged is None and f.size > 0: forged = f update = True continue if (g.size > 0 or f.size > 0) and not update: genuine = genuine if g.size == 0 else np.append( genuine, g, axis=0) forged = forged if f.size == 0 else np.append( forged, f, axis=0) print(genuine.shape ) # (num_samples, num_time_steps, window_size*num_features) print( f'Options\nWindow size --> {window_size}\nOverlap (in samples) --> {overlap}\nMax length {max_len}\n{"— "*20}' ) print( f'Number of time steps --> {genuine.shape[1]}\nNumber of features at each step --> {genuine.shape[2]}' ) print(f'Number of genuine samples --> {genuine.shape[0]}') print(f'Number of forged samples --> {forged.shape[0]}') x = {"genuine": genuine, 'forged': forged} return x
def init(self): spinner = self.app.spinner if os.geteuid() != 0: self.app.spinner.fail('please run as root') exit(1) self.__has_program('apt-ftparchive') self.__has_program('apt-ftparchive') self.__has_program('apt-get') self.__has_program('bash') self.__has_program('cat') self.__has_program('chmod') self.__has_program('chown') self.__has_program('chown') self.__has_program('curl') self.__has_program('cut') self.__has_program('dpkg-buildpackage') self.__has_program('du') self.__has_program('fakeroot') self.__has_program('find') self.__has_program('gpg') self.__has_program('gzip') self.__has_program('mkisofs') self.__has_program('mksquashfs') self.__has_program('mount') self.__has_program('rngd') self.__has_program('sudo') self.__has_program('umount') self.__has_program('unsquashfs') if len(self.missing_programs) > 0: spinner.fail('missing the following programs') sys.stdout.writelines( _.map(self.missing_programs, lambda x: x + '\n')) exit(1)
def depart_entry(self, node): length = 0 i = len(self.table_entries) - 1 for row in self.table_rows: if len(row.children) > i: entry_length = len(row.children[i].astext()) if entry_length > length: length = entry_length padding = ''.join( _.map(range(length - len(node.astext())), lambda: ' ')) self.add(padding + ' ')
def __search_deep(self, signal: S): value = self.__search_current(signal) if value is not None: return value # gather all value from children if it cannot be found in current store values = _.filter( _.map(self.children, lambda store: store.__search_deep(signal)), lambda x: x != None) if values: return _.flatten(values)
def release_note_blocks(self): block_strings = _.map(self.release_note_objs, lambda rls_note_obj: rls_note_obj.to_block_str()) if block_strings: release_notes_str = '\n\n'.join(block_strings) else: release_notes_str = '' logger.info('Release note blocks:\n{rn}'.format(rn=release_notes_str)) return release_notes_str
def depart_thead(self, node): for i in range(len(self.table_entries)): length = 0 for row in self.table_rows: if len(row.children) > i: entry_length = len(row.children[i].astext()) if entry_length > length: length = entry_length self.add('| ' + ''.join(_.map(range(length), lambda: '-')) + ' ') self.add('|\n') self.table_entries = [] self.theads.pop()
def list_to_dict(input_list): """Creates dictionary with keys from list values This function is primarily useful for converting passed data from Angular checkboxes, since angular ng-model can't return list of checked group of checkboxes, instead it returns something like {'a': True, 'b': True} for each checkbox Example: >>> list_to_dict(['a', 'b']) {'a': True, 'b': True} Args: input_list (list): List of any type Returns: dict: Dict with 'True' values """ return _.zip_object(input_list, _.map(input_list, _.constant(True)))
def list_to_dict(input_list): """Creates dictionary with keys from list values This function is primarily useful for converting passed data from Angular checkboxes, since angular ng-model can't return list of checked group of checkboxes, instead it returns something like {'a': True, 'b': True} for each checkbox Example: >>> list_to_dict(['a', 'b']) {'a': True, 'b': True} Args: input_list (list): List of any type Returns: dict: Dict with 'True' values """ return _.zip_object(input_list, _.map(input_list, _.constant(True)))
def reachable_release_tags_from_commit( self, repo: git.Repo, commit: git.objects.Commit) -> typing.List[str]: '''Returns a list of release-tags whose tagged commits are ancestors of the given commit. The returned list is sorted in descending order, putting the greatest reachable tag first. ''' tags = self.release_tags() visited = set() queue = list() queue.append(commit) visited.add(commit.hexsha) reachable_tags = list() while queue: commit = queue.pop(0) if commit.hexsha in tags: reachable_tags.append(tags[commit.hexsha]) not_visited_parents = _.filter( commit.parents, lambda parent_commit: parent_commit.hexsha not in visited) if not_visited_parents: queue.extend(not_visited_parents) visited |= set( _.map(not_visited_parents, lambda commit: commit.hexsha)) reachable_tags.sort(key=lambda t: version.parse_to_semver(t), reverse=True) if not reachable_tags: logger.warning('no release tag found, falling back to root commit') root_commits = repo.iter_commits(rev=commit, max_parents=0) root_commit = next(root_commits, None) if not root_commit: fail( f'could not determine root commit from rev {commit.hexsha}' ) if next(root_commits, None): fail( 'cannot determine range for release notes. Repository has multiple root ' 'commits. Specify range via commit_range parameter.') reachable_tags.append(root_commit.hexsha) return reachable_tags
def load_conf(conf): if _.includes(sys.argv, '-h') or _.includes(sys.argv, '--help'): return conf cwd_path = os.getcwd() flag = None if _.includes(sys.argv, '--source'): flag = '--source' elif _.includes(sys.argv, '--src'): flag = '--src' elif _.includes(sys.argv, '-s'): flag = '-s' if flag: flag_index = _.index_of(sys.argv, flag) if len(sys.argv) > flag_index + 1: cwd_path = path.abspath(sys.argv[flag_index + 1]) config_path = path.join(cwd_path, 'config.yml') if not path.exists(config_path): Halo(text='config not found: ' + config_path).fail() exit(1) with open(config_path, 'r') as f: try: conf = munchify(_.merge({}, conf, yaml.load(f))) except yaml.YAMLError as err: print(err) exit(1) conf.paths.cwd = cwd_path if 'version' in conf: conf.version = str(conf.version) conf.paths.install = path.join(conf.paths.mount, 'casper') if not path.exists(path.join(conf.paths.install, 'filesystem.squashfs')): conf.paths.install = path.join(conf.paths.mount, 'install') output_path = conf.paths.output conf = munchify( _.merge({}, conf, { 'paths': _.zip_object( _.keys(conf.paths), _.map(conf.paths, lambda x: path.abspath(path.join(conf.paths.cwd, x)))) })) conf.paths.output = path.abspath(output_path) return conf
def reachable_release_tags_from_commit(github_helper: GitHubRepositoryHelper, repo: git.Repo, commit: git.objects.Commit) -> [str]: tags = release_tags(github_helper, repo) visited = set() queue = list() queue.append(commit) visited.add(commit.hexsha) reachable_tags = list() while queue: commit = queue.pop(0) if commit.hexsha in tags: reachable_tags.append(tags[commit.hexsha]) not_visited_parents = _.filter( commit.parents, lambda parent_commit: parent_commit.hexsha not in visited) if not_visited_parents: queue.extend(not_visited_parents) visited |= set( _.map(not_visited_parents, lambda commit: commit.hexsha)) reachable_tags.sort(key=lambda t: parse_version_info(t), reverse=True) if not reachable_tags: warning('no release tag found, falling back to root commit') root_commits = repo.iter_commits(rev=commit, max_parents=0) root_commit = next(root_commits, None) if not root_commit: fail('could not determine root commit from rev {rev}'.format( rev=commit.hexsha)) if next(root_commits, None): fail( 'cannot determine range for release notes. Repository has multiple root commits. ' 'Specify range via commit_range parameter.') reachable_tags.append(root_commit.hexsha) return reachable_tags
def test_list_format_transformation(self): def replace_names(profile): profile['first_name'] = 'First Name is ' + profile['first_name'] profile['last_name'] = 'Last Name is ' + profile['last_name'] return profile template = { 'profiles': FormatTrans([{ 'first_name': S('first_name'), 'last_name': S('last_name'), }], lambda x: _.map(x, replace_names)) } res = self.matched.format(template) expected = { 'profiles': [{ 'first_name': 'First Name is Marc', 'last_name': 'Last Name is Simon' }, { 'first_name': 'First Name is Bryan', 'last_name': 'Last Name is Coloma' }] } self.assertEqual(res, expected)
def get_key(self): c = self.app.conf spinner = self.app.spinner gpg_keys = self.app.gpg_keys gpg_key = None if len(gpg_keys) == 1: gpg_key = gpg_keys[0] elif len(gpg_keys) > 1: spinner.stop() answer = munchify( inquirer.prompt([ inquirer.List('gpg_key', message='choose a gpg key', choices=_.map( gpg_keys, lambda x: x.pub.key.short + ': ' + x.name + ' <' + x.email + '>')) ])).gpg_key spinner.start() gpg_key = _.find( gpg_keys, lambda x: x.pub.key.short == answer[:answer.index(':')]) if not gpg_key: raise Exception('failed to find gpg key') return gpg_key
checkCoeffList(coeffs_list) + 1) prepareCoeefs = _.flow(_.reverse, sliceZeroCoeffs) size = lambda list: _.size(list) reverse = lambda list: _.reverse(list) append = lambda appends, list: list + appends curryLeftAppend = _.curry(append) curryRightAppend = _.curry_right(append) isNumerical = lambda x: isinstance(x, (int, float)) isListEmpty = lambda list: size(list) == 0 isListNumerical = _().every(isNumerical) addToFirstElem = _.curry(lambda value, list: [list[0] + value] + list[1:]) sumIdenticalSizeList = lambda a_list, b_list: _.map( a_list, lambda x, index: b_list[index] + x) # size b gt size a sumGtList = lambda a, b: _.flow( sumIdenticalSizeList, curryLeftAppend(_.slice(b, size(a), size(b))))(a, b) sumList = lambda a, b: sumGtList(a, b) if size(b) > size(a) else sumGtList( b, a) valueList = _.curry(lambda item, length: map(item, range(length))) zeroList = valueList(lambda _: 0) listMulOnValue = lambda num: _().map(lambda x: num * x) polyMulMonome = lambda degree, num, list: _.flow( listMulOnValue(num), curryRightAppend(zeroList(degree)))(list) def list_insert(lst, item): lst.append(item)