def get(cls, cli, sp=None, port_id=None): ret = VNXSPPortList(cli) if sp is not None: ret = filter(lambda p: p.sp == sp, ret) if port_id is not None: ret = filter(lambda p: p.port_id == port_id, ret) if sp is not None and port_id is not None and len(ret) == 1: ret = ret[0] return ret
def get(cls, cli, sp=None, port_id=None, vport_id=None): if sp is not None and port_id is not None and vport_id is not None: ret = VNXConnectionPort(sp, port_id, vport_id, cli) else: ret = VNXConnectionPortList(cli) if sp is not None: ret = filter(lambda p: p.sp == sp, ret) if port_id is not None: ret = filter(lambda p: p.port_id == port_id, ret) if port_id is not None: ret = filter(lambda p: p.virtual_port_id == vport_id, ret) return ret
def do_run(self, left, right, arg, scope): if 'index' in self.optargs and self.optargs['index'] != 'id': index_func, is_multi = self.find_index_func_for_scope( self.optargs['index'], arg) if isinstance(index_func, RFunc): map_fn = lambda d: index_func.run([d], scope) else: map_fn = index_func result = [] left = list(left) if is_multi: seen_ids = set([]) for elem in left: indexed = map_fn(elem) if not isinstance(indexed, (tuple, list)): indexed = [indexed] indexed = set(indexed) for match_item in right: if match_item in indexed: if elem['id'] not in seen_ids: seen_ids.add(elem['id']) result.append(elem) break else: for elem in left: if map_fn(elem) in right: result.append(elem) return result else: return filter(util.match_attr_multi('id', right), left)
def do_run(self, left, right, arg, scope): if 'index' in self.optargs and self.optargs['index'] != 'id': index_func, is_multi = self.find_index_func_for_scope( self.optargs['index'], arg ) if isinstance(index_func, RFunc): map_fn = lambda d: index_func.run([d], scope) else: map_fn = index_func result = [] left = list(left) if is_multi: seen_ids = set([]) for elem in left: indexed = map_fn(elem) if not isinstance(indexed, (tuple, list)): indexed = [indexed] indexed = set(indexed) for match_item in right: if match_item in indexed: if elem['id'] not in seen_ids: seen_ids.add(elem['id']) result.append(elem) break else: for elem in left: if map_fn(elem) in right: result.append(elem) return result else: return filter(util.match_attr_multi('id', right), left)
def test_get_seqs_by_ctg(self): iids_by_ctg = [[1, 2, 4], [1, 2], [3, 5], [4], [6], [2, 4]] get_seq = lambda i: 'ACGT' if i in [2, 4] else 'A'*i reds = [make_mock_red(i, i, get_seq(i), 'D'*len(get_seq(i))) for i in range(1, len(iids_by_ctg)+ 1)] fastq_records = [make_seq_record(get_seq(i)) for i in range(1, len(iids_by_ctg)+1)] saved_records = fastq_records[:] random.shuffle(fastq_records) result = filter(bool, map(tuple, a2f.get_seqs_by_ctg(fastq_records, reds, iids_by_ctg))) getters_by_ctg = map(lambda A: itemgetter(*[i - 1 for i in A]), iids_by_ctg) raw_expected = [get(saved_records) for get in getters_by_ctg] expected = filter(bool, map(lambda a: (a,) if type(a) is not tuple else a, raw_expected)) # Bio.Seq objects can't be compared directly need to get __dict__ attribute dicter = lambda tup: [obj.seq.tostring() for obj in tup] self.assertEquals(map(dicter, expected), map(dicter, result)) pass
def has_snap(self): """ This method won't count the snaps in "destroying" state! :return: false if no snaps or all snaps are destroying. """ return len( filter(lambda s: s.state != SnapStateEnum.DESTROYING, self.snapshots)) > 0
def to_int_arr(inputs): if inputs is not None: if isinstance(inputs, six.string_types): inputs = re.split(',| ', inputs) ints = map(to_int, inputs) ret = list(filter(partial(is_not, None), ints)) else: ret = [] return ret
def _get_member(self, index): def filter_by_sp_name(member): sp = VNXSPEnum.from_str(member.name) return sp == index result = filter(filter_by_sp_name, self.list) ret = None if len(result) > 0: ret = result[0] return ret
def get_candidate_sequences(self, under_index): ''' :param int under_index: the current position in the matrix which is under-covered. Candidates have not been used already, overlap the current index, and are not "orphan" or "anomalous" reads. orphaned/anomalous reads don't get spotted by default mpileup command. see https://github.com/VDBWRAIR/ngs_mapper/issues/112 ''' #TODO: have this only look backwards until reach under-covered index start = max(under_index - self.max_seq_length, 0) sub_matrix = self.seq_matrix[start:under_index+1] '''flatten the matrix''' prev_seqs = [seq for row in sub_matrix for seq in row] return filter(lambda seq: seq.overlap >= under_index and self.allow(seq) and not seq.picked, prev_seqs)
def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] netifs = filter(lambda x: not getattr(x, "control", False), node.netifs()) if len(netifs) > 0: interfacenames = map(lambda x: x.name, netifs) cmd += " -i " cmd += " -i ".join(interfacenames) return (cmd, )
def get_candidate_sequences(self, under_index): ''' :param int under_index: the current position in the matrix which is under-covered. Candidates have not been used already, overlap the current index, and are not "orphan" or "anomalous" reads. orphaned/anomalous reads don't get spotted by default mpileup command. see https://github.com/VDBWRAIR/ngs_mapper/issues/112 ''' #TODO: have this only look backwards until reach under-covered index start = max(under_index - self.max_seq_length, 0) sub_matrix = self.seq_matrix[start:under_index + 1] '''flatten the matrix''' prev_seqs = [seq for row in sub_matrix for seq in row] return filter( lambda seq: seq.overlap >= under_index and self.allow(seq) and not seq.picked, prev_seqs)
def test_noniterators_produce_lists(self): l = range(10) self.assertTrue(isinstance(l, list)) l2 = zip(l, list('ABCDE')*2) self.assertTrue(isinstance(l2, list)) double = lambda x: x*2 l3 = map(double, l) self.assertTrue(isinstance(l3, list)) is_odd = lambda x: x % 2 == 1 l4 = filter(is_odd, range(10)) self.assertEqual(l4, [1, 3, 5, 7, 9]) self.assertTrue(isinstance(l4, list))
def _get_correct_checkpoint(root, checkpoint): ckptPath = root.joinpath("checkpoints") if ckptPath.is_dir(): if checkpoint != 'latest': allCheckPoints = filter(lambda x: x.name == checkpoint, ckptPath.iterdir()) relevant_checkpoint = None if len( allCheckPoints) == 0 else allCheckPoints[0] else: allCheckPoints = list(ckptPath.iterdir()) relevant_checkpoint = None if len(allCheckPoints) == 0 else sorted( allCheckPoints, key=lambda x: int(x.name), reverse=True)[0] return relevant_checkpoint else: raise IOError("Checkpoint path does not exist!!")
def get_seqs_by_ctg(fastq_records, reds, iids_by_ctg): ''' Transforms the fastq records and reds into pandas.DataFrame objects and joins them on the sequence string column. Then, this DataFrame is sliced according to ``iids_by_ctg`` and returned. The result is a 2D list of SeqRecord objects organized by the contigs they mapped to. :param list fastq_records: a collection of Bio.SeqRecord objects :param list reds: a collection of amos.RED objects :param list iids_by_ctg: a 2D list of iids (organized by contig) :return A 2D list of bio.SeqRecord objects organized by the contig they map to. ''' fastq_df = bio_records_as_df(fastq_records) reds_df = amos_reds_as_df(collection=reds) fastq_df, reds_df = fastq_df.set_index('seq'), reds_df.set_index('seq') assert reds_df.shape == fastq_df.shape, "should have the same number of columns (seqs, seq_obj/iid) and rows (fastq reads / AMOS REDs." reds_with_seqs_df = join_non_unique_dataframes(reds_df, fastq_df) dfs_by_ctg = extract_dfs_by_iids(reds_with_seqs_df, iids_by_ctg) seqs_by_ctg = [df['seq_obj'] for df in dfs_by_ctg] assert not filter(series_contains_nan, seqs_by_ctg), "NaN value found in resulting dataframe, something went wrong." return seqs_by_ctg
def get_seqs_by_ctg(fastq_records, reds, iids_by_ctg): ''' Transforms the fastq records and reds into pandas.DataFrame objects and joins them on the sequence string column. Then, this DataFrame is sliced according to ``iids_by_ctg`` and returned. The result is a 2D list of SeqRecord objects organized by the contigs they mapped to. :param list fastq_records: a collection of Bio.SeqRecord objects :param list reds: a collection of amos.RED objects :param list iids_by_ctg: a 2D list of iids (organized by contig) :return A 2D list of bio.SeqRecord objects organized by the contig they map to. ''' fastq_df = bio_records_as_df(fastq_records) reds_df = amos_reds_as_df(collection=reds) fastq_df, reds_df = fastq_df.set_index('seq'), reds_df.set_index('seq') assert reds_df.shape == fastq_df.shape, "should have the same number of columns (seqs, seq_obj/iid) and rows (fastq reads / AMOS REDs." reds_with_seqs_df = join_non_unique_dataframes(reds_df, fastq_df) dfs_by_ctg = extract_dfs_by_iids(reds_with_seqs_df, iids_by_ctg) seqs_by_ctg = [df['seq_obj'] for df in dfs_by_ctg] assert not filter( series_contains_nan, seqs_by_ctg ), "NaN value found in resulting dataframe, something went wrong." return seqs_by_ctg
def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] cmd += " -l /var/log/nrlnhdp.log" cmd += " -rpipe %s_nhdp" % node.name servicenames = map(lambda x: x.name, node.services) if "SMF" in servicenames: cmd += " -flooding ecds" cmd += " -smfClient %s_smf" % node.name netifs = filter(lambda x: not getattr(x, "control", False), node.netifs()) if len(netifs) > 0: interfacenames = map(lambda x: x.name, netifs) cmd += " -i " cmd += " -i ".join(interfacenames) return (cmd, )
def generate_config(cls, node, filename): """ Generate a startup script for SMF. Because nrlsmf does not daemonize, it can cause problems in some situations when launched directly using vcmd. """ cfg = "#!/bin/sh\n" cfg += "# auto-generated by nrl.py:NrlSmf.generateconfig()\n" comments = "" cmd = "nrlsmf instance %s_smf" % node.name servicenames = map(lambda x: x.name, node.services) netifs = filter(lambda x: not getattr(x, "control", False), node.netifs()) if len(netifs) == 0: return "" if "arouted" in servicenames: comments += "# arouted service is enabled\n" cmd += " tap %s_tap" % (node.name, ) cmd += " unicast %s" % cls.firstipv4prefix(node, 24) cmd += " push lo,%s resequence on" % netifs[0].name if len(netifs) > 0: if "NHDP" in servicenames: comments += "# NHDP service is enabled\n" cmd += " ecds " elif "OLSR" in servicenames: comments += "# OLSR service is enabled\n" cmd += " smpr " else: cmd += " cf " interfacenames = map(lambda x: x.name, netifs) cmd += ",".join(interfacenames) cmd += " hash MD5" cmd += " log /var/log/nrlsmf.log" cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg
def saveGissue(): token = vim.eval("g:github_access_token") if not token: print("github-issues.vim: In order to save an issue or add a comment, you need to define a GitHub token. See: https://github.com/jaxbot/github-issues.vim#configuration") return parens = getFilenameParens() number = parens[2] encoding = "utf-8" # TODO: Get this from vim issue = { 'title': '', 'body': '', 'assignees': '', 'labels': '', 'milestone': '' } commentmode = 0 comment = "" for line in vim.current.buffer: if commentmode == 1: if line == "## Add a comment": commentmode = 2 continue if commentmode == 2: if line != "": commentmode = 3 comment += line + "\n" continue if commentmode == 3: comment += line + "\n" continue if line == "## Comments": commentmode = 1 continue if len(line.split("## Reported By:")) > 1: continue title = line.split("## Title:") if len(title) > 1: issue['title'] = title[1].strip().split(" (" + number + ")")[0] continue state = line.split("## State:") if len(state) > 1: if state[1].strip().lower() == "closed": issue['state'] = "closed" else: issue['state'] = "open" continue milestone = line.split("## Milestone:") if len(milestone) > 1: milestones = getMilestoneList(parens[0] + "/" + parens[1], "") milestone = milestone[1].strip() for mstone in milestones: if mstone["title"] == milestone: issue['milestone'] = str(mstone["number"]) break continue labels = line.split("## Labels:") if len(labels) > 1: issue['labels'] = labels[1].lstrip().split(', ') continue assignees = line.split("## Assignees:") if len(assignees) > 1: issue['assignees'] = assignees[1].lstrip().split(' ') continue if line == SHOW_COMMITS or line == SHOW_FILES_CHANGED or "## Branch Name:" in line: continue if issue['body'] != '': issue['body'] += '\n' issue['body'] += line # remove blank entries issue['labels'] = filter(bool, issue['labels']) if number == "new": if issue['assignees'] == '': del issue['assignees'] if issue['milestone'] == '': del issue['milestone'] if issue['body'] == '': del issue['body'] data = "" try: repourl = getUpstreamRepoURI() url = ghUrl("/issues", repourl) data = json.dumps(issue).encode(encoding) request = urllib2.Request(url, data) data = json.loads(urllib2.urlopen(request, timeout=2).read()) parens[2] = str(data['number']) vim.current.buffer.name = "gissues/" + \ parens[0] + "/" + parens[1] + "/" + parens[2] except urllib2.HTTPError as e: if "code" in e and e.code == 410 or e.code == 404: print( "github-issues.vim: Error creating issue. Do you have a github_access_token defined?") else: print("github-issues.vim: Unknown HTTP error:") print(e) print(data) print(url) print(issue) else: repourl = vim.eval("b:ghissue_repourl") url = ghUrl("/issues/" + number, repourl) data = json.dumps(issue).encode(encoding) # TODO: Fix POST data should be bytes. request = urllib2.Request(url, data) request.get_method = lambda: 'PATCH' try: urllib2.urlopen(request, timeout=2) except urllib2.HTTPError as e: if "code" in e and e.code == 410 or e.code == 404: print("Could not update the issue as it does not belong to you!") if commentmode == 3: try: url = ghUrl("/issues/" + parens[2] + "/comments", repourl) data = json.dumps({'body': comment}).encode(encoding) request = urllib2.Request(url, data) urllib2.urlopen(request, timeout=2) except urllib2.HTTPError as e: if "code" in e and e.code == 410 or e.code == 404: print( "Could not post comment. Do you have a github_access_token defined?") if commentmode == 3 or number == "new": showIssue() # mark it as "saved" vim.command("setlocal nomodified")
def do_run(self, sequence, filt_fn, arg, scope): return filter(filt_fn, sequence)
def do_run(self, sequence, filter_fn, arg, scope): return len(filter(filter_fn, list(sequence)))
def do_run(self, sequence, to_match, arg, scope): return filter(util.match_attrs(to_match), sequence)