def check_illumina_format(inputs, targets):
    '''
    Raise error if any of the input filenames are not in the desired format.
    
    inputs : filenames or filehandle, or iterable of strings/filehandles
        files to be checked
    target : string, or iterable of strings
        'illumina13' or 'illumina18' or 'ambiguous', or some combination
    '''
    
    # convert to lists if required
    inputs = util.listify(inputs)
    targets = util.listify(targets)
    
    # check that input targets are in acceptable list
    acceptable_targets = set(['illumina13', 'illumina18', 'ambiguous'])
    if not set(targets).issubset(acceptable_targets):
        bad_targets = targets - acceptable_targets
        raise ArgumentError("unrecognized format type(s): %s" % bad_targets)
    
    # check all the formats
    formats = [file_format(i) for i in inputs]
    tests = [form in targets for form in formats]
    bad_files = [i for i, test in zip(i, tests) if test == False]
    bad_forms = [form for form, test in zip(formats, tests) if test == False]
    
    # complain if something went wrong
    if False in tests:
        bad_info = "\n".join([" ".join([i, form]) for i, form in zip(bad_files, bad_forms)])
        raise RuntimeError("files do not appear to be in %s format: \n%s" % (targets, bad_info))
    else:
        return True
 def on_train_begin(self, session):
     super(LRFindScheduler, self).on_train_begin(session)
     if self.start_lr is not None: self.base_lrs = util.listify(self.start_lr, session.optimizer.param_groups)
     self.end_lr = util.listify(self.end_lr, session.optimizer.param_groups)
     self.multipliers = [(end_lr/base_lr)**(1/self.num_examples) for base_lr, end_lr in zip(self.base_lrs, self.end_lr)]
     self.lrs = []
     self.best=40
示例#3
0
    def search(self, query=None, col=None, col_id=None, doc_id=None,
               advanced=False, format=None, exact=None, exclusions=None,
               tophit=0, maxhits=10, sort_by=None,
               filenameq=None):
        """Search document collections.

        :Parameters:
            - `query`: the search query
            - `col`: the (list of) collection(s) to be searched.
            - `doc_id`: A document to generate a search query.
            - `advanced`: the style of search form.

        One of `query` or (`col_id` and `doc_id`) should be provided

        If `col` and `query` are provided then use `query` to search
        all the document collections named by `col` and return the
        results. (In this case the value of `advanced` is ignored.)

        Otherwise render a page containing a form to initiate a new
        search. If `advanced` tests true then the form will have more
        structure.

        """
        assert not (query and doc_id)
        template = self._advanced_template if advanced else self._template

        if col:
            cols = util.listify(col)
        else:
            cols = []

        if format:
            formats = util.listify(format)
        else:
            formats = []

        tophit = int (tophit)
        maxhits = int (maxhits)
        if ((query or exact or exclusions or format or filenameq)
            or (col_id and doc_id)):
            if 'html' in formats:
                formats.append('htm')
            results = self._flax_data.collections.search(
                query, col_id=col_id, doc_id=doc_id, cols=cols, format=format,
                exact=exact, exclusions=exclusions, tophit=tophit,
                maxhits=maxhits, sort_by=sort_by, filenameq=filenameq)
            return template(self._flax_data.collections, results, cols,
                            self._flax_data.formats, formats, sort_by,
                            filenameq)
        else:
            return template(self._flax_data.collections, None, cols,
                            self._flax_data.formats, formats, sort_by,
                            filenameq)
 def _update_mappings(self, path=None, mapping=None, **kwargs):
     # This relies on the ordering of the form elements, I'm not
     # sure if it's safe to do so, although it seems to work fine.
     if (path == None or mapping == None):
         return
     self.paths = util.listify(path)
     # Normalise all path separators to system specific versions.
     self.paths = [path.replace('/', os.path.sep) for path in self.paths]
     mappings = util.listify(mapping)
     # be careful with the order here
     pairs = zip(self.paths, mappings)
     self.mappings = dict(filter(lambda (x, y): x != '', pairs))
     self.paths = filter(None, self.paths)
示例#5
0
def genCharacterReport(mainFrame, sp):
    report = characterreport.CharacterReport(sp)

    if not report.cinfo:
        wx.MessageBox("No Speaking Characters Found", "Error", wx.OK,
                      mainFrame)

        return

    charNames = []
    for s in util.listify(report.cinfo, "name"):
        charNames.append(misc.CheckBoxItem(s))

    dlg = misc.CheckBoxDlg(mainFrame, "Report Type", report.inf,
                           "Information to include", False, charNames,
                           "Characters to include", True)

    ok = False
    if dlg.ShowModal() == wx.ID_OK:
        ok = True

        for i in range(len(report.cinfo)):
            report.cinfo[i].include = charNames[i].selected

    dlg.Destroy()

    if not ok:
        return

    data = report.generate()

    gutil.showTempPDF(data, sp.cfgGl, mainFrame)
示例#6
0
def genCharacterReport(mainFrame, sp):
    report = CharacterReport(sp)

    if not report.cinfo:
        wx.MessageBox("No characters speaking found.",
                      "Error", wx.OK, mainFrame)

        return

    charNames = []
    for s in util.listify(report.cinfo, "name"):
        charNames.append(misc.CheckBoxItem(s))

    dlg = misc.CheckBoxDlg(mainFrame, "Report type", report.inf,
        "Information to include:", False, charNames,
        "Characters to include:", True)

    ok = False
    if dlg.ShowModal() == wx.ID_OK:
        ok = True

        for i in range(len(report.cinfo)):
            report.cinfo[i].include = charNames[i].selected

    dlg.Destroy()

    if not ok:
        return

    data = report.generate()

    gutil.showTempPDF(data, sp.cfgGl, mainFrame)
示例#7
0
def build_exam():
    inv_pcts = {s: 1.0 / scoring[s][1] / scoring[s][0] for s in subjects}
    rel_pcts = {s: inv_pcts[s] / sum(inv_pcts.values()) for s in subjects}
    num_qs = {s: int(round(exam_length * rel_pcts[s])) for s in rel_pcts}
    exam = {s: heuristic(s, num_qs[s]) for s in scoring}

    return listify(exam)
def monitor_project_composition(conf):
    """
    issuetype and status composition
    """
    statuses = conf['planning_workflow_statuses'] + conf[
        'wip_workflow_statuses']
    status_ordinal_map = dict(
        zip(statuses, [
            chr(i) for i in reversed(
                range(ord('z') - len(statuses) + 1,
                      ord('z') + 1))
        ]))
    issues = jira.search_issues('project = "%s" and status in (%s)' %
                                (conf['project_id'], listify(statuses)),
                                maxResults=500)
    issue_type_status_counts = Counter([
        (i.fields.issuetype.name, i.fields.status.name, is_parent_issue(i))
        for i in issues if i.fields.status.name in statuses
    ])
    for (issue_type, status,
         is_parent), count in issue_type_status_counts.iteritems():
        PROJECT_UNRESOLVED_ISSUES.labels(
            conf['project_name'], issue_type, status,
            status_ordinal_map[status] + '_' +
            status.replace(' ', '_').lower(),
            str(status in conf['wip_workflow_statuses']), is_parent,
            conf['product_granularity'] == issue_type).set(count)
        logger.info(
            'Observed for %s (%s), number of %s %s issues in status "%s": %d' %
            (conf['project_name'], conf['project_id'],
             'parent' if is_parent else 'child', issue_type, status, count))
    def set_lr(self, lrs):
        lrs = util.listify(lrs, self.optimizer.param_groups)
        if len(lrs) != len(self.optimizer.param_groups):
            raise ValueError("Size Mismatch: Expected lrs of length {} but got {}".format(len(self.optimizer.param_groups), len(lrs)))

        for param_group, lr in zip(self.optimizer.param_groups, lrs):
            param_group['lr'] = lr
示例#10
0
    def process_object(self, items):
        '''do the process on object'''
        type_name = self.args.get("type", None)

        if type_name is None:
            raise ValueError("type parameter required")
        else:
            defs = self.get_default_args()
            type_name = util.listify(type_name)

            if len(type_name) == 1:
                name = str(type_name[0])

                if name in util.TYPE_CHECKS:
                    return util.TYPE_CHECKS[name](defs)
                else:
                    raise ValueError("unknown type " + name)
            elif (len(type_name) == 3 and
                    type_name[0] == "list" and
                    type_name[1] == "of"):

                if isinstance(defs, list):
                    name = type_name[2]

                    if name in util.TYPE_CHECKS:
                        check = util.TYPE_CHECKS[name]
                        return all(check(item) for item in defs)
                    else:
                        raise ValueError("unknown type " + name)
                else:
                    return False
示例#11
0
 def update(self, description="", **kwargs):
     self.description = description
     if 'formats' in kwargs and kwargs['formats'] and 'html' in kwargs[
             'formats']:
         kwargs['formats'] = util.listify(kwargs['formats'])
         kwargs['formats'].append('htm')
     filespec.FileSpec.update(self, **kwargs)
     dbspec.DBSpec.update(self, **kwargs)
     schedulespec.ScheduleSpec.update(self, **kwargs)
     self._update_mappings(**kwargs)
示例#12
0
 def update(self, paths=None, oldest=None, formats=[], **kwargs):
     if paths == None:
         paths = []
     self.paths = [paths] if isinstance(paths, str) else paths
     self.oldest = oldest
     # each file format can have several extensions, make sure we have them all
     self.fileextensions = []
     self.formats = util.listify(formats)
     for f, v in flax.options.fileexts.iteritems():
         if f in self.formats:
             self.fileextensions.extend(v)
示例#13
0
 def check_for_nonempty(self, fns):
     '''assert that each file exists and is nonempty'''
     fns = util.listify(fns)
 
     if self.method == 'dry_run':
         message("dry run: test that files are non-empty: " + " ".join(fns), indent=4)
     else:
         self.check_for_existence(fns)
         tests = [os.stat(fn).st_size > 0 for fn in fns]
         if False in tests:
             bad_names = " ".join([fn for fn, t in zip(fns, tests) if t == False])
             raise RuntimeError("file(s) empty: " + bad_names)
示例#14
0
    def process_object(self, items):
        '''do the process on object'''

        current = self.get_args_list(True)

        to_append = items.get("items", None)

        if to_append is None:
            raise ValueError("items parameter required")

        current.extend(util.listify(to_append))
        return current
示例#15
0
    def process_object(self, items):
        '''do the process on items'''

        defs, single = self.get_args_list(True, True)

        args = util.listify(self.args.get("args", []))

        result = self.process_list(defs, args)

        if single:
            return result[0]
        else:
            return result
def transition_stories_with_wip_subtasks_to_wip(conf):
    stories = jira.search_issues(
        """
        project = "%s" and
        issuetype = "Story" and
        status in (%s)
        """ %
        (conf['project_id'], listify(conf['planning_workflow_statuses'])))
    for story in stories:
        subtask_statuses = set(
            [sub.fields.status.name for sub in story.fields.subtasks])
        if subtask_statuses & set(conf['wip_workflow_statuses']):
            transition_to(conf, story, u'In Progress')
示例#17
0
    def check_for_existence(self, fns):
        '''assert that each of filenames does exist'''

        fns = util.listify(fns)

        if self.method == 'dry_run':
            message("dry run: test for existence of files: " + " ".join(fns), indent=4)
        else:
            # running ls first seems to prevent spurious empties
            subprocess.check_output(['ls', '-lah'])
            tests = [os.path.isfile(fn) for fn in fns]
            if False in tests:
                bad_names = " ".join([fn for fn, test in zip(fns, tests) if test == False])
                raise RuntimeError("file(s) missing: %s" % bad_names)
示例#18
0
    def check_for_nonempty(self, fns):
        '''assert that each file exists and is nonempty'''
        fns = util.listify(fns)

        if self.method == 'dry_run':
            message("dry run: test that files are non-empty: " + " ".join(fns),
                    indent=4)
        else:
            self.check_for_existence(fns)
            tests = [os.stat(fn).st_size > 0 for fn in fns]
            if False in tests:
                bad_names = " ".join(
                    [fn for fn, t in zip(fns, tests) if t == False])
                raise RuntimeError("file(s) empty: " + bad_names)
示例#19
0
def _tuplify_lists(*args):
    """
    Similar to zip(), but use None if lists aren't long enough, and
    don't skip any None list entry
    """
    args = [util.listify(l) for l in args]
    maxlen = max([len(l) for l in args])

    ret = []
    for idx in range(maxlen):
        tup = tuple()
        for l in args:
            tup += (idx >= len(l) and (None, ) or (l[idx], ))
        ret.append(tup)
    return ret
示例#20
0
    def check_for_existence(self, fns):
        '''assert that each of filenames does exist'''

        fns = util.listify(fns)

        if self.method == 'dry_run':
            message("dry run: test for existence of files: " + " ".join(fns),
                    indent=4)
        else:
            # running ls first seems to prevent spurious empties
            subprocess.check_output(['ls', '-lah'])
            tests = [os.path.isfile(fn) for fn in fns]
            if False in tests:
                bad_names = " ".join(
                    [fn for fn, test in zip(fns, tests) if test == False])
                raise RuntimeError("file(s) missing: %s" % bad_names)
示例#21
0
    def new_setter(self, val, *args, **kwargs):
        # Do this regardless, for validation purposes
        fset(self, val, *args, **kwargs)

        if not self._xml_node:
            return

        # Convert from API value to XML value
        val = fget(self)
        if set_converter:
            val = set_converter(self, val)
        elif default_converter and val == "default":
            val = default_converter(self)

        nodexpath = xpath
        if xml_set_xpath:
            nodexpath = xml_set_xpath(self)

        if nodexpath is None:
            return

        nodes = util.listify(
            _get_xpath_node(self._xml_ctx, nodexpath, is_multi))

        xpath_list = nodexpath
        if xml_set_list:
            xpath_list = xml_set_list(self)

        node_map = _tuplify_lists(nodes, val, xpath_list)
        for node, val, usexpath in node_map:
            if node:
                usexpath = node.nodePath()

            if val not in [None, False]:
                if not node:
                    node = _build_xpath_node(self._xml_node, usexpath)

                if val is True:
                    # Boolean property, creating the node is enough
                    pass
                else:
                    node.setContent(util.xml_escape(str(val)))
            else:
                _remove_xpath_node(self._xml_node, usexpath)
def transition_epics_with_wip_issues_to_wip(conf):
    epics = jira.search_issues(
        """
        project = "%s" and
        issuetype = "Epic" and
        status in (%s) and
        labels not in ("container")
        """ %
        (conf['project_id'], listify(conf['planning_workflow_statuses'])))
    for epic in epics:
        subissues = jira.search_issues(
            'project = "%s" and "Epic Link" = "%s"' %
            (conf['project_id'], epic.key))
        subissue_statuses = set([
            sub.fields.status.name
            for sub in (subissues + epic.fields.subtasks)
        ])
        if subissue_statuses and \
           subissue_statuses & set(conf['wip_workflow_statuses']):
            transition_to(conf, epic, u'In Progress')
def transition_epics_with_resolved_issues_to_resolved(conf):
    epics = jira.search_issues("""
        project = "%s" and
        issuetype = "Epic" and
        status in (%s)
        """ % (conf['project_id'],
               listify(conf['planning_workflow_statuses'] +
                       conf['wip_workflow_statuses'])))
    for epic in epics:
        subissues = jira.search_issues('"Epic Link" = "%s"' % epic.key)
        subissue_statuses = set([
            sub.fields.status.name
            for sub in (subissues + epic.fields.subtasks)
        ])
        if subissue_statuses and \
           len(subissue_statuses & set(
                conf['planning_workflow_statuses'] +
                conf['wip_workflow_statuses']
           )) == 0:
            transition_to(conf, epic, conf['completed_status'])
示例#24
0
    def new_getter(self, *args, **kwargs):
        val = None
        getval = fget(self, *args, **kwargs)
        if not self._xml_node:
            return getval

        if default_converter and getval == "default":
            return getval

        usexpath = xpath
        if xml_get_xpath:
            usexpath = xml_get_xpath(self)

        if usexpath is None:
            return getval

        nodes = util.listify(_get_xpath_node(self._xml_ctx, usexpath,
                                             is_multi))
        if nodes:
            ret = []
            for node in nodes:
                val = node.content
                if get_converter:
                    val = get_converter(self, val)
                elif is_bool:
                    val = True

                if not is_multi:
                    return val
                # If user is querying multiple nodes, return a list of results
                ret.append(val)
            return ret

        elif is_bool:
            return False
        elif get_converter:
            getval = get_converter(self, None)

        return getval
示例#25
0
    def process_object(self, items):
        '''do the process on object'''

        template = util.listify(self.args.get("template", None))

        if len(template) > 0 and template[0] is None:
            raise ValueError("template parameter required")
        elif len(template) == 1 and isinstance(template[0], basestring):
            template = template[0]
        else:
            raise ValueError("template parameter should be a string, got:" +
                    str(template))

        # if just the template parameter is defined get the rest from defaults
        # or stdin
        if len(self.args) == 1:
            ctx = self.get_default_args()
        else:
            ctx = self.args

        if not isinstance(ctx, dict):
            ctx = dict(value=ctx)

        return pystache.render(template, ctx)
示例#26
0
def render_formats(node, formats):
    if formats:
        for format in util.listify(formats):
            if format != 'htm':
                getattr(node, format).atts['checked'] = 'on'
示例#27
0
def unit_test(bld, sources, incs=(), uses=(), csources=()):
    '''Register unit tests.

    Example usage

    >>> def build(bld):
    >>>     tsrcs=bld.path.ant_glob("test/test*.cpp")
    >>>     bld.unit_test(tsrcs, "inc src")

    The sources should list all unit test main source files.
    
    The incs may give any special include directories such as the
    source directory if tests require access to private headers.

    The uses can specify additional dependencies beyond what the
    package requires.

    The csources are like sources but for to make "check" programs
    which are not executed as unit tests and not installed but
    available under build directory to run for local checking.

    '''
    sources = util.listify(sources)
    incs = util.listify(incs)

    me = getattr(Context.g_module, 'APPNAME', None)
    uses = util.listify(uses) + util.uses(bld)
    if me:
        uses.insert(0, me)

    if bld.options.no_tests:
        return
    if not sources:
        return

    features = 'test cxx'
    if bld.options.quell_tests:
        features = 'cxx'

    rpath = util.rpath(bld) + [bld.path.find_or_declare(bld.out_dir)]

    for tmain in sources:
        bld.program(features=features,
                    source=[tmain],
                    target=tmain.name.replace('.cpp', ''),
                    ut_cwd=bld.path,
                    install_path=None,
                    includes=incs,
                    rpath=rpath,
                    use=uses)

    for cmain in csources:
        bld.program(features='cxx',
                    source=[cmain],
                    target=cmain.name.replace('.cpp', ''),
                    ut_cwd=bld.path,
                    install_path=None,
                    includes=incs,
                    rpath=rpath,
                    use=uses)

    bld.add_post_fun(waf_unit_test.summary)