コード例 #1
0
def check_file_list(any_format,ignore_warnings=True, warnings=[]):
    """
    get the folder for the job or a batch job
    a batch job can be either a readable text file name: file_name.any_extension - contents must be file with list of folders, one per line
    or it can be a glob-like search pattern for folders//*
    the folders may or may not contain valid tif data with well-named frames
    if any are invalid, the invalid ones are added to warnings. the valid ones are returned in a list
    a caller can decide what to do about the warnings which will be listed (ignore/quit)
    If print_info==True:
    the following folders can be processed:
    folder range #gaps 
    the following cannot be processed:
    folder issue
    """
    import os
    from glob import glob
    folders = [any_format]
    if os.is_file(any_format):
        with open(any_format) as f: folders = [_f for _f in f]
    if any_format[-1]=="*":  folders = list(glob(any_format))
    valid_folders = []
    for f in folders:
        c = context.folder_context(f)
        if not c.meta_key["valid"]: warnings.append(f)
        else: valid_folders.append(c.meta_key)
          
    #list the goods and the bads by simply listing the info for the folder but then say warnings in tabs if req
    
    if ignore_warnings == False and len(warnings) > 0:
        #some folders cannot be processed. Do you want to continue(y) or quit(n)?
        pass
            
    return valid_folders
コード例 #2
0
    def _get_all_warning_lines(self, data):
        warnings = []
        for row in data:
            splitrow = row.split()
            if len(splitrow) > 0:
                if splitrow[0] == 'WARNING:':
                    warnings.append(" ".join(splitrow[1:]))

        return warnings
コード例 #3
0
def load_notebook(resources=None, verbose=False, hide_banner=False):
    ''' Prepare the IPython notebook for displaying Bokeh plots.

    Args:
        resources (Resource, optional) :
            how and where to load BokehJS from

        verbose (bool, optional) :
            whether to report detailed settings (default: False)

        hide_banner (bool, optional):
            whether to hide the Bokeh banner (default: False)

    Returns:
        None

    '''
    global _notebook_loaded

    from .resources import INLINE
    from .templates import NOTEBOOK_LOAD, RESOURCES

    if resources is None:
        resources = INLINE

    plot_resources = RESOURCES.render(
        js_raw = resources.js_raw,
        css_raw = resources.css_raw,
        js_files = resources.js_files,
        css_files = resources.css_files,
    )

    if resources.mode == 'inline':
        js_info = 'inline'
        css_info = 'inline'
    else:
        js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
        css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files

    warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']

    if _notebook_loaded:
        warnings.append('Warning: BokehJS previously loaded')

    _notebook_loaded = resources

    html = NOTEBOOK_LOAD.render(
        plot_resources = plot_resources,
        logo_url = resources.logo_url,
        verbose = verbose,
        js_info = js_info,
        css_info = css_info,
        bokeh_version = __version__,
        warnings = warnings,
        hide_banner = hide_banner,
    )
    utils.publish_display_data({'text/html': html})
コード例 #4
0
ファイル: nbextensions.py プロジェクト: ChunHungLiu/notebook
def validate_nbextension_python(spec, full_dest, logger=None):
    """Assess the health of an installed nbextension

    Returns a list of warnings.

    Parameters
    ----------

    spec : dict
        A single entry of _jupyter_nbextension_paths():
            [{
                'section': 'notebook',
                'src': 'mockextension',
                'dest': '_mockdestination',
                'require': '_mockdestination/index'
            }]
    full_dest : str
        The on-disk location of the installed nbextension: this should end
        with `nbextensions/<dest>`
    logger : Jupyter logger [optional]
        Logger instance to use
    """
    infos = []
    warnings = []

    section = spec.get("section", None)
    if section in NBCONFIG_SECTIONS:
        infos.append(u"  {} section: {}".format(GREEN_OK, section))
    else:
        warnings.append(u"  {}  section: {}".format(RED_X, section))

    require = spec.get("require", None)
    if require is not None:
        require_path = os.path.join(
            full_dest[0:-len(spec["dest"])],
            u"{}.js".format(require))
        if os.path.exists(require_path):
            infos.append(u"  {} require: {}".format(GREEN_OK, require_path))
        else:
            warnings.append(u"  {}  require: {}".format(RED_X, require_path))

    if logger:
        if warnings:
            logger.warning("- Validating: problems found:")
            for msg in warnings:
                logger.warning(msg)
            for msg in infos:
                logger.info(msg)
            logger.warning(u"Full spec: {}".format(spec))
        else:
            logger.info(u"- Validating: {}".format(GREEN_OK))

    return warnings
コード例 #5
0
def validate_nbextension_python(spec, full_dest, logger=None):
    """Assess the health of an installed nbextension

    Returns a list of warnings.

    Parameters
    ----------

    spec : dict
        A single entry of _jupyter_nbextension_paths():
            [{
                'section': 'notebook',
                'src': 'mockextension',
                'dest': '_mockdestination',
                'require': '_mockdestination/index'
            }]
    full_dest : str
        The on-disk location of the installed nbextension: this should end
        with `nbextensions/<dest>`
    logger : Jupyter logger [optional]
        Logger instance to use
    """
    infos = []
    warnings = []

    section = spec.get("section", None)
    if section in NBCONFIG_SECTIONS:
        infos.append(u"  {} section: {}".format(GREEN_OK, section))
    else:
        warnings.append(u"  {}  section: {}".format(RED_X, section))

    require = spec.get("require", None)
    if require is not None:
        require_path = os.path.join(
            full_dest[0:-len(spec["dest"])],
            u"{}.js".format(require))
        if os.path.exists(require_path):
            infos.append(u"  {} require: {}".format(GREEN_OK, require_path))
        else:
            warnings.append(u"  {}  require: {}".format(RED_X, require_path))

    if logger:
        if warnings:
            logger.warning("- Validating: problems found:")
            for msg in warnings:
                logger.warning(msg)
            for msg in infos:
                logger.info(msg)
            logger.warning(u"Full spec: {}".format(spec))
        else:
            logger.info(u"- Validating: {}".format(GREEN_OK))

    return warnings
コード例 #6
0
ファイル: autosummary.py プロジェクト: Yefei100/eden
def autosummary_directive(dirname, arguments, options, content, lineno,
                          content_offset, block_text, state, state_machine):
    """
    Pretty table containing short signatures and summaries of functions etc.

    autosummary also generates a (hidden) toctree:: node.

    """

    names = []
    names += [
        x.strip().split()[0] for x in content
        if x.strip() and re.search(r'^[a-zA-Z_]',
                                   x.strip()[0])
    ]

    table, warnings, real_names = get_autosummary(names, state, 'nosignatures'
                                                  in options)
    node = table

    env = state.document.settings.env
    suffix = env.config.source_suffix
    all_docnames = env.found_docs.copy()
    dirname = posixpath.dirname(env.docname)

    if 'toctree' in options:
        tree_prefix = options['toctree'].strip()
        docnames = []
        for name in names:
            name = real_names.get(name, name)

            docname = tree_prefix + name
            if docname.endswith(suffix):
                docname = docname[:-len(suffix)]
            docname = posixpath.normpath(posixpath.join(dirname, docname))
            if docname not in env.found_docs:
                warnings.append(
                    state.document.reporter.warning(
                        'toctree references unknown document %r' % docname,
                        line=lineno))
            docnames.append(docname)

        tocnode = sphinx.addnodes.toctree()
        tocnode['includefiles'] = docnames
        tocnode['maxdepth'] = -1
        tocnode['glob'] = None
        tocnode['entries'] = [(None, docname) for docname in docnames]

        tocnode = autosummary_toc('', '', tocnode)
        return warnings + [node] + [tocnode]
    else:
        return warnings + [node]
コード例 #7
0
ファイル: works.py プロジェクト: ArmindoFlores/ao3_api
    def warnings(self):
        """Returns all the work's warnings

        Returns:
            list: List of warnings
        """

        html = self._soup.find("dd", {"class": "warning tags"})
        warnings = []
        if html is not None:
            for warning in html.find_all("li"):
                warnings.append(warning.a.string)
        return warnings
コード例 #8
0
ファイル: __init__.py プロジェクト: ssssam/calliope
def add_location(tracker, item):
    if 'track' not in item:
        # We can only add location for 'track' items.
        return item

    tracker_item = tracker.track(artist_name=item['artist'], track_name=item['track'])

    if tracker_item:
        item.update(tracker_item)
    else:
        warnings = item.get('warnings', [])
        warnings.append('tracker: Unknown track')
        item['warnings'] = warnings

    return item
コード例 #9
0
ファイル: autosummary.py プロジェクト: B-Rich/nibabel
def autosummary_directive(dirname, arguments, options, content, lineno,
                          content_offset, block_text, state, state_machine):
    """
    Pretty table containing short signatures and summaries of functions etc.

    autosummary also generates a (hidden) toctree:: node.

    """

    names = []
    names += [x.strip().split()[0] for x in content
              if x.strip() and re.search(r'^[a-zA-Z_]', x.strip()[0])]

    table, warnings, real_names = get_autosummary(names, state,
                                                  'nosignatures' in options)
    node = table

    env = state.document.settings.env
    suffix = env.config.source_suffix
    all_docnames = env.found_docs.copy()
    dirname = posixpath.dirname(env.docname)

    if 'toctree' in options:
        tree_prefix = options['toctree'].strip()
        docnames = []
        for name in names:
            name = real_names.get(name, name)

            docname = tree_prefix + name
            if docname.endswith(suffix):
                docname = docname[:-len(suffix)]
            docname = posixpath.normpath(posixpath.join(dirname, docname))
            if docname not in env.found_docs:
                warnings.append(state.document.reporter.warning(
                    'toctree references unknown document %r' % docname,
                    line=lineno))
            docnames.append(docname)

        tocnode = sphinx.addnodes.toctree()
        tocnode['includefiles'] = docnames
        tocnode['maxdepth'] = -1
        tocnode['glob'] = None
        tocnode['entries'] = [(None, docname) for docname in docnames]

        tocnode = autosummary_toc('', '', tocnode)
        return warnings + [node] + [tocnode]
    else:
        return warnings + [node]
コード例 #10
0
ファイル: parser.py プロジェクト: Deltares/HYDROLIB-core
    def _get_empty_line_warnings(self):
        if len(self.empty_lines) == 0:
            return []

        warnings = []
        empty_line = (self.empty_lines[0], self.empty_lines[0])

        for line in self.empty_lines[1:]:
            if line == empty_line[1] + 1:
                empty_line = (empty_line[0], line)
            else:
                warnings.append(Block._get_empty_line_msg(empty_line))
                empty_line = (line, line)
        warnings.append(Block._get_empty_line_msg(empty_line))

        return warnings
コード例 #11
0
ファイル: __init__.py プロジェクト: ssssam/calliope
def add_location(tracker, item):
    if 'track' not in item:
        # We can only add location for 'track' items.
        return item

    tracker_item = tracker.track(artist_name=item['artist'],
                                 track_name=item['track'])

    if tracker_item:
        item.update(tracker_item)
    else:
        warnings = item.get('warnings', [])
        warnings.append('tracker: Unknown track')
        item['warnings'] = warnings

    return item
コード例 #12
0
ファイル: nbextensions.py プロジェクト: ChunHungLiu/notebook
def validate_nbextension(require, logger=None):
    """Validate a named nbextension.

    Looks across all of the nbextension directories.

    Returns a list of warnings.

    require : str
        require.js path used to load the extension
    logger : Jupyter logger [optional]
        Logger instance to use
    """
    warnings = []
    infos = []

    js_exists = False
    for exts in _nbextension_dirs():
        # Does the Javascript entrypoint actually exist on disk?
        js = u"{}.js".format(os.path.join(exts, *require.split("/")))
        js_exists = os.path.exists(js)
        if js_exists:
            break

    require_tmpl = u"        - require? {} {}"
    if js_exists:
        infos.append(require_tmpl.format(GREEN_OK, require))
    else:
        warnings.append(require_tmpl.format(RED_X, require))
    
    if logger:
        if warnings:
            logger.warning(u"      - Validating: problems found:")
            for msg in warnings:
                logger.warning(msg)
            for msg in infos:
                logger.info(msg)
        else:
            logger.info(u"      - Validating: {}".format(GREEN_OK))
    
    return warnings
コード例 #13
0
def validate_nbextension(require, logger=None):
    """Validate a named nbextension.

    Looks across all of the nbextension directories.

    Returns a list of warnings.

    require : str
        require.js path used to load the extension
    logger : Jupyter logger [optional]
        Logger instance to use
    """
    warnings = []
    infos = []

    js_exists = False
    for exts in jupyter_path('nbextensions'):
        # Does the Javascript entrypoint actually exist on disk?
        js = u"{}.js".format(os.path.join(exts, *require.split("/")))
        js_exists = os.path.exists(js)
        if js_exists:
            break

    require_tmpl = u"        - require? {} {}"
    if js_exists:
        infos.append(require_tmpl.format(GREEN_OK, require))
    else:
        warnings.append(require_tmpl.format(RED_X, require))

    if logger:
        if warnings:
            logger.warning(u"      - Validating: problems found:")
            for msg in warnings:
                logger.warning(msg)
            for msg in infos:
                logger.info(msg)
        else:
            logger.info(u"      - Validating: {}".format(GREEN_OK))

    return warnings
コード例 #14
0
    def _read_warnings(self):
        """Poll all the warning loggers and extract any new warnings that have
        been logged. If the warnings belong to a category that is currently
        disabled, this method will discard them and they will no longer be
        retrievable.

        Returns a list of (timestamp, message) tuples, where timestamp is an
        integer epoch timestamp."""
        warnings = []
        while True:
            # pull in a line of output from every logger that has
            # output ready to be read
            loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
            closed_loggers = set()
            for logger in loggers:
                line = logger.readline()
                # record any broken pipes (aka line == empty)
                if len(line) == 0:
                    closed_loggers.add(logger)
                    continue
                # parse out the warning
                timestamp, msgtype, msg = line.split('\t', 2)
                timestamp = int(timestamp)
                # if the warning is valid, add it to the results
                if self.warning_manager.is_valid(timestamp, msgtype):
                    warnings.append((timestamp, msg.strip()))

            # stop listening to loggers that are closed
            self.warning_loggers -= closed_loggers

            # stop if none of the loggers have any output left
            if not loggers:
                break

        # sort into timestamp order
        warnings.sort()
        return warnings
コード例 #15
0
def process_errors_and_warnings(messages):
    errors, warnings = [], []

    for message in messages:
        # By default, script templates include an
        # 'upstream = None' line in the parameters cell but users may delete
        # it. If that happens and they reference 'upstream' in the body of the
        # notebook but the task does not have upstream dependencies, the
        # injected cell won't add the upstream variable, causing an undefined
        # name error. To provide more context, we modify the original error and
        # add a hint. Note that this only happens when using the Python API
        # directly, since the Spec API requires an upstream = None placeholder
        if (isinstance(message, UndefinedName)
                and message.message_args == ('upstream', )):
            message.message = (
                message.message +
                '. Did you forget to declare upstream dependencies?')

        if isinstance(message, _ERRORS):
            errors.append(message)
        else:
            warnings.append(message)

    return _process_messages(errors), _process_messages(warnings)
コード例 #16
0
def load_filenames(args):
    global gl

    warnings = []
    errors = []

    for arg in args:
        #{
        for sampfname in glob.glob(arg):

            if True:
                try:
                    sampf = file(sampfname, "rb")
                except IOError, msg:
                    errors.append(msg)

                if sampf:
                    sampf.close()

            samp = Samp()

            # strip directory
            basename = sampfname.replace("\\", "/")
            basename = sampfname.split("/")[-1]

            # strip ".wav" extension
            basename = basename.split(".")
            basename = ".".join(basename[0:-1])
            basename = jtrans.tr(basename, DELIMS, " ")

            # get layer name

            parts = basename.split(" ")
            if len(parts) <= abs(LAYER_LOC):
                loc = LAYER_LOC
                if loc >= 0:
                    loc += 1
                print >> sys.stderr, (
                    "After splitting filename '%s' delimiters," % (basename))
                print >> sys.stderr, (
                    "there aren't enough parts to find part number %d." % loc)
                sys.exit(1)
            layername = parts[LAYER_LOC]

            # get note: might be MIDI number or note name

            if len(parts) <= abs(NOTE_LOC):
                loc = NOTE_LOC
                if loc >= 0:
                    loc += 1
                print >> sys.stderr, (
                    "After splitting filename '%s' at delimiters, there aren't"
                    % (basename))
                print >> sys.stderr, (
                    "there aren't enough parts to find part number %d." % loc)
                sys.exit(1)
            notespec = parts[NOTE_LOC]

            mnote = jmidi.notenum(notespec)
            if mnote == None:
                print >> sys.stderr, (
                    "Invalid MIDI note designation '%s' in '%s'" %
                    (notespec, basename))
                sys.exit(1)

            # print sampfname, mnote, layername, jmidi.mnote_name(mnote)[0]
            samp.fname = sampfname
            samp.mnote = mnote
            samp.notename = jmidi.mnote_name(mnote, pad=None)
            samp.layername = layername
            if layername not in gl.layernum:
                warnings.append("Sample for unconfigured layer '%s': %s" %
                                (samp.layername, samp.fname))
                continue
            samp.layer = gl.layernum[layername]

            if samp.layer == None:
                warnings.append("Sample for missing layer '%s': %s" %
                                (samp.layername, samp.fname))
                continue

            x = LO_KEY - MAX_NOTE_SHIFT
            if (samp.mnote < max(0, LO_KEY - MAX_NOTE_SHIFT)
                    or samp.mnote > HI_KEY + MAX_NOTE_SHIFT):

                warnings.append("Sample outside useful note range (%s): %s" %
                                (samp.notename, samp.fname))
                continue

            samp.char = None
            gl.samps[sampfname] = samp
            gl.grid[samp.layer][mnote] = samp
コード例 #17
0
def DoPresubmitChecks(change, committing, verbose, output_stream, input_stream, default_presubmit, may_prompt):
    """Runs all presubmit checks that apply to the files in the change.

  This finds all PRESUBMIT.py files in directories enclosing the files in the
  change (up to the repository root) and calls the relevant entrypoint function
  depending on whether the change is being committed or uploaded.

  Prints errors, warnings and notifications.  Prompts the user for warnings
  when needed.

  Args:
    change: The Change object.
    committing: True if 'gcl commit' is running, False if 'gcl upload' is.
    verbose: Prints debug info.
    output_stream: A stream to write output from presubmit tests to.
    input_stream: A stream to read input from the user.
    default_presubmit: A default presubmit script to execute in any case.
    may_prompt: Enable (y/n) questions on warning or error.

  Warning:
    If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
    SHOULD be sys.stdin.

  Return:
    True if execution can continue, False if not.
  """
    start_time = time.time()
    presubmit_files = ListRelevantPresubmitFiles(change.AbsoluteLocalPaths(True), change.RepositoryRoot())
    if not presubmit_files and verbose:
        output_stream.write("Warning, no presubmit.py found.\n")
    results = []
    executer = PresubmitExecuter(change, committing)
    if default_presubmit:
        if verbose:
            output_stream.write("Running default presubmit script.\n")
        fake_path = os.path.join(change.RepositoryRoot(), "PRESUBMIT.py")
        results += executer.ExecPresubmitScript(default_presubmit, fake_path)
    for filename in presubmit_files:
        filename = os.path.abspath(filename)
        if verbose:
            output_stream.write("Running %s\n" % filename)
        # Accept CRLF presubmit script.
        presubmit_script = gclient_utils.FileRead(filename, "rU")
        results += executer.ExecPresubmitScript(presubmit_script, filename)

    errors = []
    notifications = []
    warnings = []
    for result in results:
        if not result.IsFatal() and not result.ShouldPrompt():
            notifications.append(result)
        elif result.ShouldPrompt():
            warnings.append(result)
        else:
            errors.append(result)

    error_count = 0
    for name, items in (("Messages", notifications), ("Warnings", warnings), ("ERRORS", errors)):
        if items:
            output_stream.write("** Presubmit %s **\n" % name)
            for item in items:
                if not item._Handle(output_stream, input_stream, may_prompt=False):
                    error_count += 1
                output_stream.write("\n")

    total_time = time.time() - start_time
    if total_time > 1.0:
        print "Presubmit checks took %.1fs to calculate." % total_time

    if not errors and warnings and may_prompt:
        if not PromptYesNo(
            input_stream, output_stream, "There were presubmit warnings. " "Are you sure you wish to continue? (y/N): "
        ):
            error_count += 1

    global _ASKED_FOR_FEEDBACK
    # Ask for feedback one time out of 5.
    if len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK:
        output_stream.write(
            "Was the presubmit check useful? Please send feedback " "& hate mail to [email protected]!\n"
        )
        _ASKED_FOR_FEEDBACK = True
    return error_count == 0
コード例 #18
0
ファイル: partitioncds.py プロジェクト: DRL/gIMble
def infer_degeneracy(parameterObj, transcriptObjs, zstore):
    #degeneracy_arrays_by_sample = collections.defaultdict(list)
    warnings = []
    beds_rejected = []
    # needs to know how many sites in output
    total_sites = 0
    transcriptObjs_by_sequence_id = collections.defaultdict(list)
    transcriptObjs_valid = 0
    length_by_sequence_id = collections.Counter()

    for transcriptObj in tqdm(transcriptObjs,
                              total=len(transcriptObjs),
                              desc="[%] Checking for ORFs... ",
                              ncols=150,
                              position=0,
                              leave=True):
        if not transcriptObj.is_orf():
            warnings.append(
                "[-] Transcript %s has no ORF: START=%s, STOP=%s, DIVISIBLE_BY_3=%s (will be skipped)"
                % (transcriptObj.transcript_id, transcriptObj.has_start(),
                   transcriptObj.has_stop(),
                   transcriptObj.is_divisible_by_three()))
            beds_rejected.append(transcriptObj.bed)
        else:
            total_sites += transcriptObj.positions.shape[0]
            transcriptObjs_by_sequence_id[transcriptObj.sequence_id].append(
                transcriptObj)
            length_by_sequence_id[
                transcriptObj.sequence_id] += transcriptObj.positions.shape[0]
            transcriptObjs_valid += 1
    samples = zstore.attrs['samples']
    degeneracy_chars = "U%s" % (len(samples) * 6
                                )  # could be improved with ploidy?
    #data = np.zeros(total_sites, dtype={'names':('sequence_id', 'start', 'end', 'degeneracy', 'codon_pos', 'orientation'),'formats':('U16', 'i8', 'i8', degeneracy_chars, 'i1', 'U1')})
    if warnings:
        with open("%s.cds.rejected_transcripts.bed" % parameterObj.outprefix,
                  'w') as fh:
            fh.write("\n".join(beds_rejected) + "\n")
        print("\n".join(warnings))
    dfs = []
    with tqdm(total=transcriptObjs_valid,
              ncols=150,
              desc="[%] Inferring degeneracy... ",
              position=0,
              leave=True) as pbar:
        for sequence_id, transcriptObjs in transcriptObjs_by_sequence_id.items(
        ):
            offset = 0
            data = np.zeros(length_by_sequence_id[sequence_id],
                            dtype={
                                'names':
                                ('sequence_id', 'start', 'end', 'degeneracy',
                                 'codon_pos', 'orientation'),
                                'formats': ('U16', 'i8', 'i8',
                                            degeneracy_chars, 'i1', 'U1')
                            })
            if sequence_id in zstore['seqs']:  # sequence has variants
                pos = np.array(zstore["seqs/%s/variants/pos" % sequence_id])
                gts = np.array(zstore["seqs/%s/variants/gts" % sequence_id])
                alt = np.array(zstore["seqs/%s/variants/alt" % sequence_id])
                ref = np.array(zstore["seqs/%s/variants/ref" % sequence_id])
                alleles_raw = np.column_stack([ref, alt])
                # initiate boolean mask with False
                mask = np.zeros(alleles_raw.shape, dtype=bool)
                acs = allel.GenotypeArray(gts).count_alleles()
                # overwrite with True those alleles_raw that occur in gts
                mask[:, 0:acs.shape[1]] = acs
                alleles = np.where(mask, alleles_raw, '')
            for transcriptObj in transcriptObjs:
                start, end = offset, offset + transcriptObj.positions.shape[0]
                data[start:end]['sequence_id'] = transcriptObj.sequence_id
                data[start:end]['start'] = transcriptObj.positions
                data[start:end]['end'] = transcriptObj.positions + 1
                data[start:end]['codon_pos'][0::3] = 1
                data[start:end]['codon_pos'][1::3] = 2
                data[start:end]['codon_pos'][2::3] = 3
                data[start:end]['orientation'] = transcriptObj.orientation
                if not sequence_id in zstore['seqs']:
                    #print("transcriptObj.degeneracy", type(transcriptObj.degeneracy), transcriptObj.degeneracy.shape)
                    data[start:end]['degeneracy'] = transcriptObj.degeneracy
                else:
                    #pos_in_cds_mask = np.isin(pos, transcriptObj.positions, assume_unique=True) # will crash if non-unique pos
                    cds_in_pos_mask = np.isin(
                        transcriptObj.positions, pos,
                        assume_unique=True)  # will crash if non-unique pos
                    for i in range(0, len(transcriptObj.sequence), 3):
                        codon_start = start + i
                        if not np.any(
                                cds_in_pos_mask[codon_start:codon_start + 3]):
                            data[codon_start:codon_start + 3][
                                'degeneracy'] = transcriptObj.degeneracy[i:i +
                                                                         3]
                        else:
                            codon_list = list(
                                filter(lambda codon: len(codon) == 3, [
                                    "".join(x) for x in itertools.product(
                                        *alleles[codon_start:codon_start + 3])
                                ]))
                            data[codon_start:codon_start +
                                 3]['degeneracy'] = degeneracy(
                                     codon_list) if codon_list else 3 * ['NA']
                offset = end
                pbar.update()
            dfs.append(
                pd.DataFrame(data=data,
                             columns=[
                                 'sequence_id', 'start', 'end', 'degeneracy',
                                 'codon_pos', 'orientation'
                             ]))
    shutil.rmtree(parameterObj.tmp_dir)
    #df = pd.DataFrame(data=data, columns=['sequence_id', 'start', 'end', 'degeneracy', 'codon_pos', 'orientation'])
    #write_df(df.sort_values(['sequence_id', 'start'], ascending=[True, True]), out_f="%s.cds.bed" % (parameterObj.outprefix), sep='\t', header=False, status=False)
    write_df(pd.concat(dfs),
             out_f="%s.cds.bed" % (parameterObj.outprefix),
             sep='\t',
             header=False,
             status=False)
コード例 #19
0
def cog_validate(  # noqa: C901
        src_path: Union[str, pathlib.PurePath],
        strict: bool = False,
        quiet: bool = False) -> Tuple[bool, List[str], List[str]]:
    """
    Validate Cloud Optimized Geotiff.

    This script is the rasterio equivalent of
    https://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/validate_cloud_optimized_geotiff.py

    Parameters
    ----------
    src_path: str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.
    strict: bool
        Treat warnings as errors
    quiet: bool
        Remove standard outputs

    Returns
    -------
    is_valid: bool
        True is src_path is a valid COG.
    errors: list
        List of validation errors.
    warnings: list
        List of validation warnings.

    """
    if isinstance(src_path, str):
        src_path = pathlib.Path(src_path)

    errors = []
    warnings = []
    details: Dict[str, Any] = {}

    if not GDALVersion.runtime().at_least("2.2"):
        raise Exception("GDAL 2.2 or above required")

    config = dict(GDAL_DISABLE_READDIR_ON_OPEN="FALSE")
    with rasterio.Env(**config):
        with rasterio.open(src_path) as src:
            if not src.driver == "GTiff":
                raise Exception("The file is not a GeoTIFF")

            filelist = [pathlib.Path(f).name for f in src.files]
            if len(filelist) > 1 and f"{src_path.name}.ovr" in filelist:
                errors.append(
                    "Overviews found in external .ovr file. They should be internal"
                )

            overviews = src.overviews(1)
            if src.width > 512 or src.height > 512:
                if not src.is_tiled:
                    errors.append(
                        "The file is greater than 512xH or 512xW, but is not tiled"
                    )

                if not overviews:
                    warnings.append(
                        "The file is greater than 512xH or 512xW, it is recommended "
                        "to include internal overviews")

            ifd_offset = int(src.get_tag_item("IFD_OFFSET", "TIFF", bidx=1))
            # Starting from GDAL 3.1, GeoTIFF and COG have ghost headers
            # e.g:
            # """
            # GDAL_STRUCTURAL_METADATA_SIZE=000140 bytes
            # LAYOUT=IFDS_BEFORE_DATA
            # BLOCK_ORDER=ROW_MAJOR
            # BLOCK_LEADER=SIZE_AS_UINT4
            # BLOCK_TRAILER=LAST_4_BYTES_REPEATED
            # KNOWN_INCOMPATIBLE_EDITION=NO
            # """
            #
            # This header should be < 200bytes
            if ifd_offset > 300:
                errors.append(
                    f"The offset of the main IFD should be < 300. It is {ifd_offset} instead"
                )

            ifd_offsets = [ifd_offset]
            details["ifd_offsets"] = {}
            details["ifd_offsets"]["main"] = ifd_offset

            if overviews and overviews != sorted(overviews):
                errors.append("Overviews should be sorted")

            for ix, dec in enumerate(overviews):

                # NOTE: Size check is handled in rasterio `src.overviews` methods
                # https://github.com/mapbox/rasterio/blob/4ebdaa08cdcc65b141ed3fe95cf8bbdd9117bc0b/rasterio/_base.pyx
                # We just need to make sure the decimation level is > 1
                if not dec > 1:
                    errors.append(
                        "Invalid Decimation {} for overview level {}".format(
                            dec, ix))

                # Check that the IFD of descending overviews are sorted by increasing
                # offsets
                ifd_offset = int(
                    src.get_tag_item("IFD_OFFSET", "TIFF", bidx=1, ovr=ix))
                ifd_offsets.append(ifd_offset)

                details["ifd_offsets"]["overview_{}".format(ix)] = ifd_offset
                if ifd_offsets[-1] < ifd_offsets[-2]:
                    if ix == 0:
                        errors.append(
                            "The offset of the IFD for overview of index {} is {}, "
                            "whereas it should be greater than the one of the main "
                            "image, which is at byte {}".format(
                                ix, ifd_offsets[-1], ifd_offsets[-2]))
                    else:
                        errors.append(
                            "The offset of the IFD for overview of index {} is {}, "
                            "whereas it should be greater than the one of index {}, "
                            "which is at byte {}".format(
                                ix, ifd_offsets[-1], ix - 1, ifd_offsets[-2]))

            block_offset = int(
                src.get_tag_item("BLOCK_OFFSET_0_0", "TIFF", bidx=1))
            if not block_offset:
                errors.append("Missing BLOCK_OFFSET_0_0")

            data_offset = int(block_offset) if block_offset else None
            data_offsets = [data_offset]
            details["data_offsets"] = {}
            details["data_offsets"]["main"] = data_offset

            for ix, dec in enumerate(overviews):
                data_offset = int(
                    src.get_tag_item("BLOCK_OFFSET_0_0",
                                     "TIFF",
                                     bidx=1,
                                     ovr=ix))
                data_offsets.append(data_offset)
                details["data_offsets"]["overview_{}".format(ix)] = data_offset

            if data_offsets[-1] < ifd_offsets[-1]:
                if len(overviews) > 0:
                    errors.append(
                        "The offset of the first block of the smallest overview "
                        "should be after its IFD")
                else:
                    errors.append(
                        "The offset of the first block of the image should "
                        "be after its IFD")

            for i in range(len(data_offsets) - 2, 0, -1):
                if data_offsets[i] < data_offsets[i + 1]:
                    errors.append(
                        "The offset of the first block of overview of index {} should "
                        "be after the one of the overview of index {}".format(
                            i - 1, i))

            if len(data_offsets) >= 2 and data_offsets[0] < data_offsets[1]:
                errors.append(
                    "The offset of the first block of the main resolution image "
                    "should be after the one of the overview of index {}".
                    format(len(overviews) - 1))

        for ix, dec in enumerate(overviews):
            with rasterio.open(src_path, OVERVIEW_LEVEL=ix) as ovr_dst:
                if ovr_dst.width >= 512 or ovr_dst.height >= 512:
                    if not ovr_dst.is_tiled:
                        errors.append(
                            "Overview of index {} is not tiled".format(ix))

    if warnings and not quiet:
        click.secho("The following warnings were found:",
                    fg="yellow",
                    err=True)
        for w in warnings:
            click.echo("- " + w, err=True)
        click.echo(err=True)

    if errors and not quiet:
        click.secho("The following errors were found:", fg="red", err=True)
        for e in errors:
            click.echo("- " + e, err=True)

    is_valid = False if errors or (warnings and strict) else True

    return is_valid, errors, warnings
コード例 #20
0
    def process_item(self, item):
        docs, material_dict = item
        grouped = group_by_material_id(material_dict, docs, 'input_structure')
        formula = docs[0]['pretty_formula']
        if not grouped:
            formula = Structure.from_dict(list(
                material_dict.values())[0]).composition.reduced_formula
            logger.debug("No material match for {}".format(formula))

        # For now just do the most recent one that's not failed
        # TODO: better sorting of docs
        all_docs = []
        for task_id, elastic_docs in grouped.items():
            elastic_docs = sorted(elastic_docs,
                                  key=lambda x:
                                  (x['order'], x['state'], x['completed_at']))
            grouped_by_order = {
                k: list(v)
                for k, v in groupby(elastic_docs, key=lambda x: x['order'])
            }
            soec_docs = grouped_by_order.get(2)
            toec_docs = grouped_by_order.get(3)
            if soec_docs:
                final_doc = soec_docs[-1]
            else:
                final_doc = toec_docs[-1]
            structure = Structure.from_dict(final_doc['optimized_structure'])
            formula = structure.composition.reduced_formula
            elements = [s.symbol for s in structure.composition.elements]
            chemsys = '-'.join(elements)

            # Issue warning if relaxed structure differs
            warnings = final_doc.get('warnings') or []
            opt = Structure.from_dict(final_doc['optimized_structure'])
            init = Structure.from_dict(final_doc['input_structure'])
            # TODO: are these the right params?
            if not StructureMatcher().fit(init, opt):
                warnings.append("Inequivalent optimization structure")
            material_mag = CollinearMagneticStructureAnalyzer(
                opt).ordering.value
            material_mag = mag_types[material_mag]
            if final_doc['magnetic_type'] != material_mag:
                warnings.append("Elastic magnetic phase is {}".format(
                    final_doc['magnetic_type']))
            warnings = warnings or None

            # Filter for failure and warnings
            k_vrh = final_doc['k_vrh']
            if k_vrh < 0 or k_vrh > 600:
                state = 'failed'
            elif warnings is not None:
                state = 'warning'
            else:
                state = 'successful'
            final_doc.update({"warnings": warnings})
            elastic_summary = {
                'task_id': task_id,
                'all_elastic_fits': elastic_docs,
                'elasticity': final_doc,
                'spacegroup': init.get_space_group_info()[0],
                'magnetic_type': final_doc['magnetic_type'],
                'pretty_formula': formula,
                'chemsys': chemsys,
                'elements': elements,
                'last_updated': self.elasticity.lu_field,
                'state': state
            }
            if toec_docs:
                # TODO: this should be a bit more refined
                final_toec_doc = deepcopy(toec_docs[-1])
                et_exp = ElasticTensorExpansion.from_voigt(
                    final_toec_doc['elastic_tensor_expansion'])
                symbol_dict = et_exp[1].zeroed(1e-2).get_symbol_dict()
                final_toec_doc.update({"symbol_dict": symbol_dict})
                set_(elastic_summary, "elasticity.third_order", final_toec_doc)
            all_docs.append(jsanitize(elastic_summary))
            # elastic_summary.update(final_doc)
        return all_docs
コード例 #21
0
def DoReviewChecks(change, committing, verbose, output_stream, input_stream,
                   default_review, may_prompt):
    """Runs all review checks that apply to the files in the change.

  This finds all .review.py files in directories enclosing the files in the
  change (up to the repository root) and calls the relevant entrypoint function
  depending on whether the change is being committed or uploaded.

  Prints errors, warnings and notifications.  Prompts the user for warnings
  when needed.

  Args:
    change: The Change object.
    committing: True if 'gcl commit' is running, False if 'gcl upload' is.
    verbose: Prints debug info.
    output_stream: A stream to write output from review tests to.
    input_stream: A stream to read input from the user.
    default_review: A default review script to execute in any case.
    may_prompt: Enable (y/n) questions on warning or error.

  Warning:
    If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
    SHOULD be sys.stdin.

  Return:
    True if execution can continue, False if not.
  """
    start_time = time.time()
    review_files = ListRelevantReviewFiles(change.AbsoluteLocalPaths(True),
                                           change.RepositoryRoot())
    if not review_files and verbose:
        output_stream.write("Warning, no .review.py found.\n")
    results = []
    executer = ReviewExecuter(change, committing)
    if default_review:
        if verbose:
            output_stream.write("Running default review script.\n")
        fake_path = os.path.join(change.RepositoryRoot(), '.review.py')
        results += executer.ExecReviewScript(default_review, fake_path)
    for filename in review_files:
        filename = os.path.abspath(filename)
        if verbose:
            output_stream.write("Running %s\n" % filename)
        # Accept CRLF review script.
        review_script = read_file(filename, 'rU')
        results += executer.ExecReviewScript(review_script, filename)

    errors = []
    notifications = []
    warnings = []
    for result in results:
        if not result.IsFatal() and not result.ShouldPrompt():
            notifications.append(result)
        elif result.ShouldPrompt():
            warnings.append(result)
        else:
            errors.append(result)

    error_count = 0
    for name, items in (('Messages', notifications), ('Warnings', warnings),
                        ('ERRORS', errors)):
        if items:
            output_stream.write('** Review %s **\n' % name)
            for item in items:
                if not item._Handle(
                        output_stream, input_stream, may_prompt=False):
                    error_count += 1
                output_stream.write('\n')

    total_time = time.time() - start_time
    if total_time > 1.0:
        print "Review checks took %.1fs to calculate." % total_time

    if not errors and warnings and may_prompt:
        if not PromptYesNo(
                input_stream, output_stream, 'There were review warnings. '
                'Are you sure you wish to continue? (y/N): '):
            error_count += 1

    return (error_count == 0)
コード例 #22
0
        
        #save to json file
        with open(dir + 'results/'+ logs["header"]["exp_name"] +'_raw.json', 'w') as outfile:
            json.dump(avgarray, outfile) 
        #save to dat type file
        with open(dir + 'results/'+ logs["header"]["exp_name"]+"_raw.dat", "w") as data_file:
            for i in range(len(frequencyA2[si:ei])):
                data_file.write("%f %f %f\n" % (frequencyA2[si:ei][i], left[i], right[i]))
    
        #Stops the logging timer
        endtime = time.localtime()

        #Extra information for logs
        warnings = list()
        if(outlier_count != 0):
            warnings.append("Damaged scans detected!")
        if(files != int(logs["header"]["N_scans,N_cal"][0])*8):
            warnings.append("File count does not match predicted!")


        #Logging data json dictionary data
        rez_logs = {}
        rez_logs["exp_name"]= logs["header"]["exp_name"]
        rez_logs["start_time"]= time.strftime("%Y-%m-%dT%H:%M:%S", starttime)
        rez_logs["end_time"] = time.strftime("%Y-%m-%dT%H:%M:%S", endtime)
        rez_logs["cores"] = comm.size
        rez_logs["file_count"] = files
        rez_logs["scan_count"] = logs["header"]["N_scans,N_cal"][0]
        rez_logs["exp_start"] = logs["1s0"]["date"]
        rez_logs["exp_end"] = logs[str(list(logs.keys())[-1])]["date"]
        rez_logs["anomalies"] = outlier_count
コード例 #23
0
ファイル: reducer.py プロジェクト: rdoyle1/astroReducer
	def check_calib(self, frame_type="ALL"):
		warnings = []
		if frame_type=="BIAS":
			if "data" not in self.cal_data["BIAS"]:
				warnings.append("Warning: No bias image found - can't do bias subtraction. Continue anyway?")
		elif frame_type=="DARK":
			if len(self.cal_data["DARK"])==0:
				warnings.append("Warning: No dark images found - can't account for dark current. Continue anyway?")
			else:
				for tag in self.cal_data["DARK"]:
					if not self.cal_data["DARK"][tag]["master"]:
						warnings.append("Warning: No bias frame applied to dark frame with exposure {}. Continue anyway?".format(tag))
		elif frame_type=="Flat Field":
			if len(self.cal_data["Flat Field"])==0:
				warnings.append("Warning: No flat images found - can't account for detector sensitivity. Continue anyway?")
			else:
				for tag in self.cal_data["Flat Field"]:
					if not self.cal_data["Flat Field"][tag]["master"]:
						warnings.append("Warning: No bias frame applied to flat frame with filter \"{}\". Continue anyway?".format(tag))
		elif frame_type=="ALL":
			warnings.extend(self.check_calib("BIAS"))
			warnings.extend(self.check_calib("DARK"))
			warnings.extend(self.check_calib("Flat Field"))
		else:
			warnings.append("Warning: Unknown frame type. Continue anyway?")
		return warnings
コード例 #24
0
ファイル: jMap.py プロジェクト: konsumer/hydro2sf2
def load_filenames(args):
    global gl

    warnings = []
    errors = []

    for arg in args: 
    #{
	for sampfname in glob.glob(arg):

	    if True:
		try:
		    sampf = file(sampfname, "rb")
		except IOError, msg:
		    errors.append(msg)

		if sampf:
		    sampf.close()

	    samp = Samp()

	    # strip directory
	    basename = sampfname.replace("\\", "/")
	    basename = sampfname.split("/")[-1]

	    # strip ".wav" extension
	    basename = basename.split(".")
	    basename = ".".join(basename[0:-1])
	    basename = jtrans.tr(basename, DELIMS, " ")

	    # get layer name

	    parts = basename.split(" ")
	    if len(parts) <= abs(LAYER_LOC):
	        loc = LAYER_LOC
		if loc >= 0:
		    loc += 1
	        print >>sys.stderr, (
		    "After splitting filename '%s' delimiters,"
		    % (basename))
	        print >>sys.stderr, (
		    "there aren't enough parts to find part number %d." % loc)
		sys.exit(1)
	    layername  = parts[LAYER_LOC]

	    # get note: might be MIDI number or note name

	    if len(parts) <= abs(NOTE_LOC):
	        loc = NOTE_LOC
		if loc >= 0:
		    loc += 1
	        print >>sys.stderr, (
		    "After splitting filename '%s' at delimiters, there aren't"
		    % (basename))
	        print >>sys.stderr, (
		    "there aren't enough parts to find part number %d." % loc)
		sys.exit(1)
	    notespec   = parts[NOTE_LOC]

	    mnote = jmidi.notenum(notespec)
	    if mnote == None:
	        print >>sys.stderr, (
		    "Invalid MIDI note designation '%s' in '%s'"
		    % (notespec, basename))
		sys.exit(1)

	    # print sampfname, mnote, layername, jmidi.mnote_name(mnote)[0]
	    samp.fname = sampfname
	    samp.mnote = mnote
	    samp.notename = jmidi.mnote_name(mnote, pad=None)
	    samp.layername = layername
	    if layername not in gl.layernum:
	        warnings.append("Sample for unconfigured layer '%s': %s"
		    % (samp.layername, samp.fname))
		continue
	    samp.layer = gl.layernum[layername]

	    if samp.layer == None:
	        warnings.append("Sample for missing layer '%s': %s"
		    % (samp.layername, samp.fname))
		continue

	    x = LO_KEY - MAX_NOTE_SHIFT
	    if (samp.mnote < max(0, LO_KEY - MAX_NOTE_SHIFT)
		or samp.mnote > HI_KEY + MAX_NOTE_SHIFT):

		warnings.append("Sample outside useful note range (%s): %s" 
		    % (samp.notename, samp.fname))
		continue

	    samp.char = None
	    gl.samps[sampfname] = samp
	    gl.grid[samp.layer][mnote] = samp
コード例 #25
0
ファイル: client.py プロジェクト: amplify-nation/pysimplesoap
    def wsdl_validate_params(self, struct, value):
        """Validate the arguments (actual values) for the parameters structure.
           Fail for any invalid arguments or type mismatches."""
        errors = []
        warnings = []
        valid = True

        # Determine parameter type
        if type(struct) == type(value):
            typematch = True
        if not isinstance(struct, dict) and isinstance(value, dict):
            typematch = True    # struct can be a dict or derived (Struct)
        else:
            typematch = False

        if struct == str:
            struct = unicode        # fix for py2 vs py3 string handling

        if not isinstance(struct, (list, dict, tuple)) and struct in TYPE_MAP.keys():
            if not type(value) == struct:
                try:
                    struct(value)       # attempt to cast input to parameter type
                except:
                    valid = False
                    errors.append('Type mismatch for argument value. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))

        elif isinstance(struct, list) and len(struct) == 1 and not isinstance(value, list):
            # parameter can have a dict in a list: [{}] indicating a list is allowed, but not needed if only one argument.
            next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[0], value)
            if not next_valid:
                valid = False
            errors.extend(next_errors)
            warnings.extend(next_warnings)

        # traverse tree
        elif isinstance(struct, dict):
            if struct and value:
                for key in value:
                    if key not in struct:
                        valid = False
                        errors.append('Argument key %s not in parameter. parameter: %s, args: %s' % (key, struct, value))
                    else:
                        next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[key], value[key])
                        if not next_valid:
                            valid = False
                        errors.extend(next_errors)
                        warnings.extend(next_warnings)
                for key in struct:
                    if key not in value:
                        warnings.append('Parameter key %s not in args. parameter: %s, value: %s' % (key, struct, value))
            elif struct and not value:
                warnings.append('parameter keys not in args. parameter: %s, args: %s' % (struct, value))
            elif not struct and value:
                valid = False
                errors.append('Args keys not in parameter. parameter: %s, args: %s' % (struct, value))
            else:
                pass
        elif isinstance(struct, list):
            struct_list_value = struct[0]
            for item in value:
                next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct_list_value, item)
                if not next_valid:
                    valid = False
                errors.extend(next_errors)
                warnings.extend(next_warnings)
        elif not typematch:
            valid = False
            errors.append('Type mismatch. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))

        return (valid, errors, warnings)
コード例 #26
0
ファイル: qespresso.py プロジェクト: chrisjsewell/ejplugins
def first_parse(lines):
    """ first parse of file to fin start/end of sections and get metadata

    Parameters
    ----------
    lines: List

    Returns
    -------

    """
    scf_start_first = None
    scf_end_last = None
    opt_start = None
    opt_end = None
    steps_num = None
    steps = []
    warnings = []
    non_terminating_errors = []
    found_job_done = False
    start_time, end_time, elapsed_time, nprocs = None, None, None, None
    for i, line in enumerate(lines):
        line = line.strip()
        if line.startswith("JOB DONE."):
            found_job_done = True
        # convergence NOT achieved after *** iterations: stopping
        if "convergence NOT achieved" in line:
            non_terminating_errors.append(line)
        if "Error in routine cdiaghg" in line:
            non_terminating_errors.append(line)
        if "DEPRECATED" in line:
            warnings.append(line)
        if "Geometry Optimization" in line:
            if "End of" in line:
                if not opt_start:
                    raise IOError(
                        "found end of geometry optimization before start in line #{0}: {1}"
                        .format(i, line))
                if opt_end:
                    raise IOError(
                        "found multiple geometry optimization ends in line #{0} and #{1}"
                        .format(opt_end, i))
                opt_end = i
            else:
                if opt_start:
                    raise IOError(
                        "found multiple geometry optimization starts in "
                        "line #{0} and #{1}".format(opt_start, i))
                opt_start = i
        if fnmatch(line,
                   "number of * steps *=*") and opt_start and not opt_end:
            new_step = split_numbers(line)[0]
            if not steps_num is None:
                if new_step != steps_num + 1:
                    pass
                    # NOTE: this isn't strictly true since the step history can be reset
                    # TODO maybe use the line above "number of scf cycles = "
                    # raise IOError("expecting geometry optimization step {0} at "
                    #               "line #{1}: {2}".format(steps_num+1, i, line))
                steps[-1] = (steps[-1][0], i)
                steps_num += 1
            else:
                steps_num = 0
            # steps_num = new_step
            steps.append((i, None))

            # Can get:
            # lsda relaxation :  a final configuration with zero
            #                    absolute magnetization has been found
            #
            # the program is checking if it is really the minimum energy structure
            # by performing a new scf iteration without any "electronic" history
            #
        # TODO should this be a separate step?
        if fnmatch(
                line,
                '*performing a new scf iteration without any "electronic" history'
        ):
            steps[-1] = (steps[-1][0], i)
            steps_num += 1
            steps.append((i, None))

        if "Self-consistent Calculation" in line and not scf_start_first:
            scf_start_first = i
        if "End of self-consistent calculation" in line:
            scf_end_last = i

        if fnmatch(line, "Parallel version *, running on * processors"):
            if nprocs is not None:
                raise IOError("found second nprocs on line {0}: {1}".format(
                    i, line))
            nprocs = int(line.split()[-2])
        elif fnmatch(line, "Parallel version *, running on * processor cores"):
            if nprocs is not None:
                raise IOError("found second nprocs on line {0}: {1}".format(
                    i, line))
            nprocs = int(line.split()[-3])

        # NB: time uses spaces instead of 0, e.g. This run was terminated on:  12:43: 3   6Sep2017
        if fnmatch(line, "Program*starts on * at *"):
            if start_time is not None:
                raise IOError(
                    "found second start time on line {0}: {1}".format(i, line))
            line_segs = line.split("at")
            atime = line_segs[-1].replace(" ", "")
            adate = line_segs[-2].split()[-1]
            start_time = datetime.strptime(atime + ' ' + adate,
                                           '%H:%M:%S %d%b%Y')
        if line.startswith("This run was terminated on:"):
            if end_time is not None:
                raise IOError("found second end time on line {0}: {1}".format(
                    i, line))
            line_segs = line.split()
            adate = line_segs[-1]
            atime = ":".join(" ".join(line_segs[:-1]).split(":")[-3:]).replace(
                " ", "")
            end_time = datetime.strptime(atime + ' ' + adate,
                                         '%H:%M:%S %d%b%Y')

    if not found_job_done:
        non_terminating_errors.append("Did not find indicator: JOB DONE.")

    if opt_end and steps:
        steps[-1] = (steps[-1][0], opt_end)

    if opt_start and not opt_end:
        raise IOError("found start of geometry optimization but no end")

    if scf_start_first and not scf_end_last:
        raise IOError("found start of SCF but no end")

    if start_time and end_time:
        delta_time = end_time - start_time
        m, s = divmod(delta_time.total_seconds(), 60)
        h, m = divmod(m, 60)
        elapsed_time = "%d:%02d:%02d" % (h, m, s)

    meta = {"elapsed_time": elapsed_time, "nprocs": nprocs}

    return opt_start, opt_end, steps, scf_start_first, scf_end_last, warnings, non_terminating_errors, meta
コード例 #27
0
def DoPresubmitChecks(change, committing, verbose, output_stream, input_stream,
                      default_presubmit, may_prompt):
    """Runs all presubmit checks that apply to the files in the change.

  This finds all PRESUBMIT.py files in directories enclosing the files in the
  change (up to the repository root) and calls the relevant entrypoint function
  depending on whether the change is being committed or uploaded.

  Prints errors, warnings and notifications.  Prompts the user for warnings
  when needed.

  Args:
    change: The Change object.
    committing: True if 'gcl commit' is running, False if 'gcl upload' is.
    verbose: Prints debug info.
    output_stream: A stream to write output from presubmit tests to.
    input_stream: A stream to read input from the user.
    default_presubmit: A default presubmit script to execute in any case.
    may_prompt: Enable (y/n) questions on warning or error.

  Warning:
    If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
    SHOULD be sys.stdin.

  Return:
    True if execution can continue, False if not.
  """
    start_time = time.time()
    presubmit_files = ListRelevantPresubmitFiles(
        change.AbsoluteLocalPaths(True), change.RepositoryRoot())
    if not presubmit_files and verbose:
        output_stream.write("Warning, no presubmit.py found.\n")
    results = []
    executer = PresubmitExecuter(change, committing)
    if default_presubmit:
        if verbose:
            output_stream.write("Running default presubmit script.\n")
        fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
        results += executer.ExecPresubmitScript(default_presubmit, fake_path)
    for filename in presubmit_files:
        filename = os.path.abspath(filename)
        if verbose:
            output_stream.write("Running %s\n" % filename)
        # Accept CRLF presubmit script.
        presubmit_script = gclient_utils.FileRead(filename, 'rU')
        results += executer.ExecPresubmitScript(presubmit_script, filename)

    errors = []
    notifications = []
    warnings = []
    for result in results:
        if not result.IsFatal() and not result.ShouldPrompt():
            notifications.append(result)
        elif result.ShouldPrompt():
            warnings.append(result)
        else:
            errors.append(result)

    error_count = 0
    for name, items in (('Messages', notifications), ('Warnings', warnings),
                        ('ERRORS', errors)):
        if items:
            output_stream.write('** Presubmit %s **\n' % name)
            for item in items:
                if not item._Handle(
                        output_stream, input_stream, may_prompt=False):
                    error_count += 1
                output_stream.write('\n')

    total_time = time.time() - start_time
    if total_time > 1.0:
        print "Presubmit checks took %.1fs to calculate." % total_time

    if not errors and warnings and may_prompt:
        if not PromptYesNo(
                input_stream, output_stream, 'There were presubmit warnings. '
                'Are you sure you wish to continue? (y/N): '):
            error_count += 1

    global _ASKED_FOR_FEEDBACK
    # Ask for feedback one time out of 5.
    if (len(results) and random.randint(0, 4) == 0
            and not _ASKED_FOR_FEEDBACK):
        output_stream.write(
            "Was the presubmit check useful? Please send feedback "
            "& hate mail to [email protected]!\n")
        _ASKED_FOR_FEEDBACK = True
    return (error_count == 0)
コード例 #28
0
ファイル: test.py プロジェクト: enaeseth/musicstand
 error = None
 warnings = []
 
 with catch_warnings(record=True) as w:
     try:
         run_test(test, debug=debug)
     except UnheardNoteError, e:
         unheard = e
     except MissingRecordingError, e:
         missing_rec = e
     except Exception:
         error = sys.exc_info()
     
 for warning in w:
     if warning.category is ExtraNoteWarning:
         warnings.append(warning.message)
 
 if unheard is not None or error is not None or missing_rec is not None:
     color_name = 'red!'
     results['errors'] += 1
 elif warnings:
     color_name = 'yellow!'
     results['warnings'] += 1
 else:
     color_name = 'green!'
     results['ok'] += 1
 
 print color(color_name, test.name)
 
 for warning in warnings:
     print '  %2d: Extra notes:' % warning.expectation, \
コード例 #29
0
def load_notebook(resources=None, verbose=False, force=False, skip=False):
    ''' Prepare the IPython notebook for displaying Bokeh plots.

    Args:
        resources (Resource, optional) :
            how and where to load BokehJS from

        verbose (bool, optional) :
            whether to report detailed settings (default: False)

        force (bool, optional) :
            whether to skip IPython notebook check (default: False)

    Returns:
        None

    '''
    global _notebook_loaded

    # It's possible the IPython folks will chance things in the future, `force` parameter
    # provides an escape hatch as long as `displaypub` works
    if not force:
        notebook = False
        try:
            notebook = 'notebook' in get_ipython(
            ).config.IPKernelApp.parent_appname
        except Exception:
            pass
        if not notebook:
            raise RuntimeError('load_notebook only works inside an '
                               'IPython notebook, try using force=True.')

    from .resources import INLINE
    from .templates import NOTEBOOK_LOAD, RESOURCES

    if resources is None:
        resources = INLINE

    plot_resources = RESOURCES.render(
        js_raw=resources.js_raw,
        css_raw=resources.css_raw,
        js_files=resources.js_files,
        css_files=resources.css_files,
    )

    if resources.mode == 'inline':
        js_info = 'inline'
        css_info = 'inline'
    else:
        js_info = resources.js_files[0] if len(
            resources.js_files) == 1 else resources.js_files
        css_info = resources.css_files[0] if len(
            resources.css_files) == 1 else resources.css_files

    warnings = [
        "Warning: " + msg['text'] for msg in resources.messages
        if msg['type'] == 'warn'
    ]

    if _notebook_loaded:
        warnings.append('Warning: BokehJS previously loaded')

    _notebook_loaded = resources

    html = NOTEBOOK_LOAD.render(
        plot_resources=plot_resources,
        logo_url=resources.logo_url,
        verbose=verbose,
        js_info=js_info,
        css_info=css_info,
        bokeh_version=__version__,
        warnings=warnings,
        skip=skip,
    )
    utils.publish_display_data({'text/html': html})
コード例 #30
0
def load_notebook(resources=None, verbose=False, force=False, skip=False):
    ''' Prepare the IPython notebook for displaying Bokeh plots.

    Args:
        resources (Resource, optional) : a resource object describing how and where to load BokehJS from
        verbose (bool, optional) : whether to report detailed settings (default: False)
        force (bool, optional) : whether to skip IPython notebook check (default: False)

    Returns:
        None

    '''
    global _notebook_loaded

    # It's possible the IPython folks will chance things in the future, `force` parameter
    # provides an escape hatch as long as `displaypub` works
    if not force:
        notebook = False
        try:
            notebook = 'notebook' in get_ipython().config.IPKernelApp.parent_appname
        except Exception:
            pass
        if not notebook:
            raise RuntimeError('load_notebook only works inside an '
                               'IPython notebook, try using force=True.')

    from .resources import INLINE
    from .templates import NOTEBOOK_LOAD, RESOURCES

    if resources is None:
        resources = INLINE

    plot_resources = RESOURCES.render(
        js_raw = resources.js_raw,
        css_raw = resources.css_raw,
        js_files = resources.js_files,
        css_files = resources.css_files,
    )

    if resources.mode == 'inline':
        js_info = 'inline'
        css_info = 'inline'
    else:
        js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
        css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files

    warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']

    if _notebook_loaded:
        warnings.append('Warning: BokehJS previously loaded')

    _notebook_loaded = resources

    html = NOTEBOOK_LOAD.render(
        plot_resources = plot_resources,
        logo_url = resources.logo_url,
        verbose = verbose,
        js_info = js_info,
        css_info = css_info,
        bokeh_version = __version__,
        warnings = warnings,
        skip = skip,
    )
    utils.publish_display_data({'text/html': html})
コード例 #31
0
ファイル: autosummary.py プロジェクト: B-Rich/nibabel
def get_autosummary(names, state, no_signatures=False):
    """
    Generate a proper table node for autosummary:: directive.

    Parameters
    ----------
    names : list of str
        Names of Python objects to be imported and added to the table.
    document : document
        Docutils document object
    
    """
    document = state.document
    
    real_names = {}
    warnings = []

    prefixes = ['']
    prefixes.insert(0, document.settings.env.currmodule)

    table = nodes.table('')
    group = nodes.tgroup('', cols=2)
    table.append(group)
    group.append(nodes.colspec('', colwidth=10))
    group.append(nodes.colspec('', colwidth=90))
    body = nodes.tbody('')
    group.append(body)

    def append_row(*column_texts):
        row = nodes.row('')
        for text in column_texts:
            node = nodes.paragraph('')
            vl = ViewList()
            vl.append(text, '<autosummary>')
            state.nested_parse(vl, 0, node)
            try:
                if isinstance(node[0], nodes.paragraph):
                    node = node[0]
            except IndexError:
                pass
            row.append(nodes.entry('', node))
        body.append(row)

    for name in names:
        try:
            obj, real_name = import_by_name(name, prefixes=prefixes)
        except ImportError:
            warnings.append(document.reporter.warning(
                'failed to import %s' % name))
            append_row(":obj:`%s`" % name, "")
            continue

        real_names[name] = real_name

        doc = get_doc_object(obj)

        if doc['Summary']:
            title = " ".join(doc['Summary'])
        else:
            title = ""
        
        col1 = u":obj:`%s <%s>`" % (name, real_name)
        if doc['Signature']:
            sig = re.sub('^[^(\[]*', '', doc['Signature'].strip())
            if '=' in sig:
                # abbreviate optional arguments
                sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
                sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
                sig = re.sub(r'=[^,)]+,', ',', sig)
                sig = re.sub(r'=[^,)]+\)$', '])', sig)
                # shorten long strings
                sig = re.sub(r'(\[.{16,16}[^,]*?),.*?\]\)', r'\1, ...])', sig)
            else:
                sig = re.sub(r'(\(.{16,16}[^,]*?),.*?\)', r'\1, ...)', sig)
            # make signature contain non-breaking spaces
            col1 += u"\\ \u00a0" + unicode(sig).replace(u" ", u"\u00a0")
        col2 = title
        append_row(col1, col2)

    return table, warnings, real_names
コード例 #32
0
def get_warnings(elastic_tensor, structure):
    """
    Generates all warnings that apply to a fitted elastic tensor

    Args:
        elastic_tensor (ElasticTensor): elastic tensor for which
            to determine warnings
        structure (Structure): structure for which elastic tensor
            is determined

    Returns:
        list of warnings

    """
    warnings = []
    if any([s.is_rare_earth_metal for s in structure.species]):
        warnings.append("Contains a rare earth element")
    eigs, eigvecs = np.linalg.eig(elastic_tensor.voigt)
    if np.any(eigs < 0.0):
        warnings.append("Elastic tensor has a negative eigenvalue")
    c11, c12, c13 = elastic_tensor.voigt[0, 0:3]
    c23 = elastic_tensor.voigt[1, 2]

    # TODO: these should be revisited at some point, are they complete?
    #       I think they might only apply to cubic systems
    if (abs((c11 - c12) / c11) < 0.05 or c11 < c12):
        warnings.append("c11 and c12 are within 5% or c12 is greater than c11")
    if (abs((c11 - c13) / c11) < 0.05 or c11 < c13):
        warnings.append("c11 and c13 are within 5% or c13 is greater than c11")
    if (abs((c11 - c23) / c11) < 0.05 or c11 < c23):
        warnings.append("c11 and c23 are within 5% or c23 is greater than c11")

    moduli = ["k_voigt", "k_reuss", "k_vrh", "g_voigt", "g_reuss", "g_vrh"]
    moduli_array = np.array([getattr(elastic_tensor, m) for m in moduli])
    if np.any(moduli_array < 2):
        warnings.append("One or more K, G below 2 GPa")

    return warnings
コード例 #33
0
def get_state_and_warnings(elastic_doc):
    """
    Generates all warnings that apply to a fitted elastic tensor

    Args:
        elastic_tensor (ElasticTensor): elastic tensor for which
            to determine warnings
        structure (Structure): structure for which elastic tensor
            is determined

    Returns:
        list of warnings

    """
    structure = elastic_doc['optimized_structure']
    warnings = []
    if any([s.is_rare_earth_metal for s in structure.species]):
        warnings.append("Structure contains a rare earth element")
    vet = np.array(elastic_doc['elastic_tensor'])
    eigs, eigvecs = np.linalg.eig(vet)
    if np.any(eigs < 0.0):
        warnings.append("Elastic tensor has a negative eigenvalue")
    c11, c12, c13 = vet[0, 0:3]
    c23 = vet[1, 2]

    # TODO: these should be revisited at some point, are they complete?
    #       I think they might only apply to cubic systems
    if (abs((c11 - c12) / c11) < 0.05 or c11 < c12):
        warnings.append("c11 and c12 are within 5% or c12 is greater than c11")
    if (abs((c11 - c13) / c11) < 0.05 or c11 < c13):
        warnings.append("c11 and c13 are within 5% or c13 is greater than c11")
    if (abs((c11 - c23) / c11) < 0.05 or c11 < c23):
        warnings.append("c11 and c23 are within 5% or c23 is greater than c11")

    moduli = ["k_voigt", "k_reuss", "k_vrh", "g_voigt", "g_reuss", "g_vrh"]
    moduli_array = np.array([get(elastic_doc, m) for m in moduli])
    if np.any(moduli_array < 2):
        warnings.append("One or more K, G below 2 GPa")

    if np.any(moduli_array > 1000):
        warnings.append("One or more K, G above 1000 GPa")

    if elastic_doc['order'] == 3:
        if elastic_doc.get("average_linear_thermal_expansion", 0) < -0.1:
            warnings.append("Negative thermal expansion")
        if len(elastic_doc['strains']) < 80:
            warnings.append("Fewer than 80 strains, TOEC may be deficient")

    failure_states = [moduli_array[2] < 0]
    if any(failure_states):
        state = "failed"
    elif warnings:
        state = "warning"
    else:
        state = "successful"
        warnings = None

    return state, warnings
コード例 #34
0
ファイル: partitioncds_bck.py プロジェクト: DRL/gIMble
def infer_degeneracy(transcriptObjs, samples, variant_arrays_by_seq_id):
    sequence_id_arrays = []
    start_arrays = []
    end_arrays = []
    degeneracy_arrays_by_sample = collections.defaultdict(list)
    warnings = []
    for transcriptObj in tqdm(transcriptObjs, total=len(transcriptObjs), desc="[%] Inferring degeneracy... ", ncols=150, position=0, leave=True):
        if not transcriptObj.is_orf():
            warnings.append("[-] Transcript %s has no ORF: START=%s, STOP=%s, DIVISIBLE_BY_3=%s (will be skipped)" % (transcriptObj.transcript_id, transcriptObj.has_start(), transcriptObj.has_stop(), transcriptObj.is_divisible_by_three()))
        else:
            if not transcriptObj.sequence_id in variant_arrays_by_seq_id:
                cds_pos_mask = np.array([])
            else:
                cds_pos_mask = np.isin(transcriptObj.positions, variant_arrays_by_seq_id[transcriptObj.sequence_id]['POS'], assume_unique=True) # will crash if non-unique pos
            if np.any(cds_pos_mask):
                sequence_id_arrays.append(np.full(cds_pos_mask.shape[0], transcriptObj.sequence_id))
                start_arrays.append(transcriptObj.positions)
                end_arrays.append(transcriptObj.positions + 1)
                gt_pos_mask = np.isin(variant_arrays_by_seq_id[transcriptObj.sequence_id]['POS'], transcriptObj.positions, assume_unique=True) # will crash if non-unique pos
                alleles = np.column_stack(
                        [
                        variant_arrays_by_seq_id[transcriptObj.sequence_id]['REF'][gt_pos_mask], 
                        variant_arrays_by_seq_id[transcriptObj.sequence_id]['ALT'][gt_pos_mask],
                        np.full(gt_pos_mask[gt_pos_mask==True].shape, '')
                        ])
                for idx, sample in enumerate(samples):
                    # at least one variant pos in VCF (even if all variants are HOMREF)
                    gt_sample = variant_arrays_by_seq_id[transcriptObj.sequence_id]['GT'][gt_pos_mask, idx]
                    idx0 = np.array([np.arange(alleles.shape[0]), gt_sample[:,0]])
                    idx1 = np.array([np.arange(alleles.shape[0]), gt_sample[:,1]])
                    variants = np.vstack(
                        [
                        alleles[tuple(idx0)], 
                        alleles[tuple(idx1)]
                        ]).T
                    cds_sample_array = np.full((transcriptObj.positions.shape[0], variants.shape[1]), '') # has shape (gt_sample, ploidy)
                    cds_sample_array[cds_pos_mask] = variants
                    cds_sample_array[~cds_pos_mask,0] = transcriptObj.sequence[~cds_pos_mask]
                    degeneracies = []
                    for i in range(0, len(transcriptObj.sequence), 3):
                        codon_list = list(filter(lambda codon: len (codon) == 3, ["".join(x) for x in itertools.product(*cds_sample_array[i:i+3])]))
                        _degeneracy = degeneracy(codon_list) if codon_list else 3 * ['NA']
                        degeneracies.append(_degeneracy)
                    _deg = np.concatenate(degeneracies)
                    #print("#", transcriptObj.transcript_id, transcriptObj.positions.shape, _deg.shape, _deg[0:3])

                    degeneracy_arrays_by_sample[sample].append(_deg)

            else:
                sequence_id_arrays.append(np.full(transcriptObj.positions.shape[0], transcriptObj.sequence_id))
                start_arrays.append(transcriptObj.positions)
                end_arrays.append(transcriptObj.positions + 1)
                for idx, sample in enumerate(samples):
                    degeneracy_arrays_by_sample[sample].append(transcriptObj.degeneracy)
    print("\n".join(warnings))
    #for sample in samples:
    #    print('degeneracy_arrays_by_sample[%s].shape' % sample, degeneracy_arrays_by_sample[sample].shape)
    for sample in tqdm(samples, total=len(samples), desc="[%] Writing output... ", ncols=150):
        #a = np.concatenate(sequence_id_arrays)
        #b = np.concatenate(start_arrays)
        #c = np.concatenate(end_arrays)
        #d = np.concatenate(degeneracy_arrays_by_sample[sample])
        #print('a.shape', a.shape)
        #print('type(b)', type(b), 'b.shape', b.shape)
        #print('c.shape', c.shape)
        #print('d.shape', d.shape, d[0:3])

        data = np.vstack([
            np.concatenate(sequence_id_arrays),
            np.concatenate(start_arrays),
            np.concatenate(end_arrays),
            np.concatenate(degeneracy_arrays_by_sample[sample]),
        ]).T
        df = pd.DataFrame(data=data, columns=['sequence_id', 'start', 'end', 'degeneracy'])
        write_df(df, out_f="%s.tsv" % sample, sep='\t', header=False, status=False)
コード例 #35
0
def run_once2(
    repo: Repo,
    environment: Environment,
    env_vars: Mapping[str, str],
    script: Path,
    limits: Mapping[str, int],
    memoize: bool,
    trace: bool,
    trace_imports: bool,
    tmp_dir: Path,
) -> ExecutionProfile:
    warnings = []
    perf_log = tmp_dir / "perf.log"
    trace_log = tmp_dir / "trace.log"
    coverage_file = tmp_dir / ".coverage"
    command, runexec_stats, stdout, stderr = run_exec_cmd(
        environment,
        [
            *([
                "coverage", "run",
                str(script), "--data-file",
                str(coverage_file), "--rcfile=/dev/null"
            ] if trace else [
                "python",
                str(ROOT / "trace_imports.py"),
                str(script),
                str(trace_log)
            ] if trace_imports else ["python", str(script)]),
        ],
        {
            "CHARMONIUM_CACHE_DISABLE": "0" if memoize else "1",
            "CHARMONIUM_CACHE_PERF_LOG": str(perf_log),
            **env_vars,
        },
        tmp_dir,
        time_limit=limits["time"],
        mem_limit=limits["mem"],
        dir_modes={
            "/": container.DIR_READ_ONLY,
            # TODO: Ideally, I would use `--overlay-dir` with tmp_dir instead of --full--access-dir.
            # See https://github.com/sosy-lab/benchexec/issues/815
            "/home": container.DIR_OVERLAY,
            str(tmp_dir): container.DIR_FULL_ACCESS,
            str(repo.dir): container.DIR_READ_ONLY,
        },
        tmp_dir=tmp_dir,
    )

    trace_data = None
    if trace_imports:
        if runexec_stats.exitcode != 0:
            print(stdout)
            print(stderr)
            print(command)
            raise ValueError
        assert trace_log.exists()
        trace_data = sorted(
            list(
                set([
                    str(unpyc(Path(line)))
                    for line in trace_log.read_text().split("\n")
                    if line.startswith("/")
                    and Path(line).is_relative_to(repo.dir)
                ])))
        trace_log.unlink()

    if trace:
        if runexec_stats.exitcode != 0:
            print(stdout)
            print(stderr)
            print(command)
            raise ValueError
        assert coverage_file.exists()
        coverage_json = tmp_dir / "coverage.json"
        subprocess.run([
            "coverage", "json", "--data-file",
            str(coverage_file), "-o",
            str(coverage_json), "--rcfile=/dev/null"
        ],
                       cwd=tmp_dir)
        coverage_file.unlink()
        coverage_obj = json.loads(coverage_json.read_text())
        coverage_json.unlink()
        trace_data = []
        for file in sorted(coverage_obj["files"]):
            file_text = Path(file).read_text().split("\n")
            for line in coverage_obj["files"][file]["executed_lines"]:
                print(file, line)
                trace_data.append(file_text[line - 1])

    if runexec_stats.termination_reason:
        warnings.append(
            f"Terminated for {runexec_stats.termination_reason} out with {limits['time']=} {limits['mem']=}"
        )
    if perf_log.exists():
        calls, kwargs = parse_memoized_log(perf_log)
    else:
        if memoize:
            warnings.append(
                "No perf log produced, despite enabling memoization.")
        # no profiling information
        calls = []
        kwargs = {}

    return ExecutionProfile(
        output=stdout,
        command=command,
        log=stderr,
        success=runexec_stats.exitcode == 0,
        func_calls=calls,
        internal_stats=kwargs,
        runexec=runexec_stats,
        warnings=warnings,
        trace_data=trace_data,
    )
コード例 #36
0
 def accumulateDeprecations(message, category, stacklevel):
     self.assertEqual(DeprecationWarning, category)
     self.assertEqual(stacklevel, 2)
     warnings.append(message)
コード例 #37
0
def migrate(customer_id, username, password, key):
    warnings = []
    customer = masterdb.Customer.get(customer_id)
    
    h = Pingdom(username, password, key)
    
    
    
    # create the contacts
    contacts = {}
    for cid, contact in h.contacts.iteritems():
    
        try: panopta_contact = masterdb.Contact.selectBy(customer=customer, fullname=contact.name)[0]
        except:
            timezone = "America/Chicago"
            panopta_contact = masterdb.Contact(
                customer=customer, fullname=contact.name,
                timezone=timezone
            )
            warnings.append("contact '%s' created with default timezone of %s" % (
                contact.name, timezone
            ))
            
        channels = []
        contacts[cid] = (panopta_contact, channels)
        
        phone = getattr(contact, "cellphone", None)
        if phone:
            phone = "+"+phone
            sms_type = masterdb.ContactType.selectBy(textkey="sms")[0]
            try: masterdb.ContactChannel.selectBy(contact_info=phone, contact_type=sms_type)[0]
            except:
                channel = masterdb.ContactChannel(
                    contact_info=phone,
                    contact_type=sms_type,
                    contact=panopta_contact,
                    label=""
                )
                channels.append(channel)
            
        email = getattr(contact, "email", None)
        if email:
            email_type = masterdb.ContactType.selectBy(textkey="email.html")[0]
            try: masterdb.ContactChannel.selectBy(contact_info=email, contact_type=email_type)[0]
            except:
                channel = masterdb.ContactChannel(
                    contact_info=email,
                    contact_type=email_type,
                    contact=panopta_contact,
                    label=""
                )
                channels.append(channel)
                
                
    
    
    # migrate the servers
    try: server_group = masterdb.ServerGroup.selectBy(name="Pingdom")[0]
    except: server_group = masterdb.ServerGroup(
        name="Pingdom", server_group=None, customer=customer,
        notification_schedule=None
    )
        
    for server in h.servers.values():
        servers = masterdb.Server.selectBy(customer=customer, fqdn=server.hostname)
        if not servers.count():      
            logging.info("creating server \"%s\"" % server.hostname)
            panopta_server = masterdb.Server(
                name = server.hostname,
                fqdn = server.hostname, 
                last_known_ip = None,
                customer = customer,
                primary_node = masterdb.MonitorNode.get_closest_node(server.ip),
                status = 'active', 
                notification_schedule=None,
                server_group=server_group
            )
        else: panopta_server = servers[0]
        

        # create the different notification schedules from the checks
        schedules = {}
        for check in server.checks:
            if not check.notifications: continue
            
            sk = [contact.id for contact in check.notifications]
            sname = [contact.name for contact in check.notifications]
            
            sname.sort()
            sk.sort()
            sk = [str(i) for i in sk]
            
            if len(sname) > 3:
                sname = sname[:3]
                sname.append("and %d more" % len(sname) - 3)
            
            sk = ",".join(sk)
            sname = ", ".join(sname)
            
            # create the notification schedule if it doesn't exist
            schedule = schedules.get(sk, None)
            if not schedule:
                logging.info("creating notification schedule \"%s\"" % sname)
                schedule = masterdb.NotificationSchedule(
                    customer = customer,
                    name = sname,
                    description = "",
                )
                schedules[sk] = schedule
                
                action = masterdb.NotificationAction(
                    schedule = schedule,
                    trigger_delay = 0,
                )
                
                for contact in check.notifications:
                    panopta_contact, channels = contacts[contact.id]
                    for channel in channels:
                        action.addContactChannel(channel)
            
            # save the applicable schedule for the next step, when we actually
            # create the monitor points
            check.schedule = schedule
                
                
                
        
        # add the service checks
        for check in server.checks:  
            service_type = masterdb.ServiceType.selectBy(textkey=check.textkey)[0]
            try:
                # does a check like this already exist on the server?
                masterdb.MonitorPoint.selectBy(
                    server=panopta_server,
                    name=check.name,
                    service_type=service_type,
                    frequency=check.frequency,
                    port=check.port,
                    status='active'
                )[0]
                continue
            except:
                logging.info("creating monitor point \"%s\"" % check.name)
                mp = masterdb.MonitorPoint(
                    server=panopta_server,
                    name=check.name,
                    service_type=service_type,
                    frequency=check.frequency,
                    port=check.port,
                    metadata=None,
                    status='active',
                    notification_schedule=check.schedule
                )
            
            mp.storeMetadata(check.metadata)
コード例 #38
0
ファイル: autosummary.py プロジェクト: Yefei100/eden
def get_autosummary(names, state, no_signatures=False):
    """
    Generate a proper table node for autosummary:: directive.

    Parameters
    ----------
    names : list of str
        Names of Python objects to be imported and added to the table.
    document : document
        Docutils document object
    
    """
    document = state.document

    real_names = {}
    warnings = []

    prefixes = ['']
    prefixes.insert(0, document.settings.env.currmodule)

    table = nodes.table('')
    group = nodes.tgroup('', cols=2)
    table.append(group)
    group.append(nodes.colspec('', colwidth=10))
    group.append(nodes.colspec('', colwidth=90))
    body = nodes.tbody('')
    group.append(body)

    def append_row(*column_texts):
        row = nodes.row('')
        for text in column_texts:
            node = nodes.paragraph('')
            vl = ViewList()
            vl.append(text, '<autosummary>')
            state.nested_parse(vl, 0, node)
            try:
                if isinstance(node[0], nodes.paragraph):
                    node = node[0]
            except IndexError:
                pass
            row.append(nodes.entry('', node))
        body.append(row)

    for name in names:
        try:
            obj, real_name = import_by_name(name, prefixes=prefixes)
        except ImportError:
            warnings.append(
                document.reporter.warning('failed to import %s' % name))
            append_row(":obj:`%s`" % name, "")
            continue

        real_names[name] = real_name

        doc = get_doc_object(obj)

        if doc['Summary']:
            title = " ".join(doc['Summary'])
        else:
            title = ""

        col1 = u":obj:`%s <%s>`" % (name, real_name)
        if doc['Signature']:
            sig = re.sub('^[^(\[]*', '', doc['Signature'].strip())
            if '=' in sig:
                # abbreviate optional arguments
                sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
                sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
                sig = re.sub(r'=[^,)]+,', ',', sig)
                sig = re.sub(r'=[^,)]+\)$', '])', sig)
                # shorten long strings
                sig = re.sub(r'(\[.{16,16}[^,]*?),.*?\]\)', r'\1, ...])', sig)
            else:
                sig = re.sub(r'(\(.{16,16}[^,]*?),.*?\)', r'\1, ...)', sig)
            # make signature contain non-breaking spaces
            col1 += u"\\ \u00a0" + unicode(sig).replace(u" ", u"\u00a0")
        col2 = title
        append_row(col1, col2)

    return table, warnings, real_names
コード例 #39
0
    def wsdl_validate_params(self, struct, value):
        """Validate the arguments (actual values) for the parameters structure.
           Fail for any invalid arguments or type mismatches."""
        errors = []
        warnings = []
        valid = True

        # Determine parameter type
        if type(struct) == type(value):
            typematch = True
        if not isinstance(struct, dict) and isinstance(value, dict):
            typematch = True  # struct can be a dict or derived (Struct)
        else:
            typematch = False

        if struct == str:
            struct = unicode  # fix for py2 vs py3 string handling

        if not isinstance(struct,
                          (list, dict, tuple)) and struct in TYPE_MAP.keys():
            if not type(value) == struct and value is not None:
                try:
                    struct(value)  # attempt to cast input to parameter type
                except:
                    valid = False
                    errors.append(
                        'Type mismatch for argument value. parameter(%s): %s, value(%s): %s'
                        % (type(struct), struct, type(value), value))

        elif isinstance(struct, list) and len(struct) == 1 and not isinstance(
                value, list):
            # parameter can have a dict in a list: [{}] indicating a list is allowed, but not needed if only one argument.
            next_valid, next_errors, next_warnings = self.wsdl_validate_params(
                struct[0], value)
            if not next_valid:
                valid = False
            errors.extend(next_errors)
            warnings.extend(next_warnings)

        # traverse tree
        elif isinstance(struct, dict):
            if struct and value:
                for key in value:
                    if key not in struct:
                        valid = False
                        errors.append(
                            'Argument key %s not in parameter. parameter: %s, args: %s'
                            % (key, struct, value))
                    else:
                        next_valid, next_errors, next_warnings = self.wsdl_validate_params(
                            struct[key], value[key])
                        if not next_valid:
                            valid = False
                        errors.extend(next_errors)
                        warnings.extend(next_warnings)
                for key in struct:
                    if key not in value:
                        warnings.append(
                            'Parameter key %s not in args. parameter: %s, value: %s'
                            % (key, struct, value))
            elif struct and not value:
                warnings.append(
                    'parameter keys not in args. parameter: %s, args: %s' %
                    (struct, value))
            elif not struct and value:
                valid = False
                errors.append(
                    'Args keys not in parameter. parameter: %s, args: %s' %
                    (struct, value))
            else:
                pass
        elif isinstance(struct, list):
            struct_list_value = struct[0]
            for item in value:
                next_valid, next_errors, next_warnings = self.wsdl_validate_params(
                    struct_list_value, item)
                if not next_valid:
                    valid = False
                errors.extend(next_errors)
                warnings.extend(next_warnings)
        elif not typematch:
            valid = False
            errors.append('Type mismatch. parameter(%s): %s, value(%s): %s' %
                          (type(struct), struct, type(value), value))

        return (valid, errors, warnings)
コード例 #40
0
                validate_required(row, 'Closing Quarter', specials, msg))
        else:
            errors.extend(validate_values(row, 'Closing Fiscal Year'))
            errors.extend(validate_values(row, 'Closing Quarter'))

        ###
        # Data validation rules. This should catch any bad data.
        ###

        if (row.get('record validity', '').lower() == 'valid facility'
                and row.get('closing stage', '').lower() != 'closed'
                and row.get('ownership type', '').lower() == 'agency owned'
                and row.get('data center tier', '').lower() not in map(
                    str.lower, VALID_VALUES['Data Center Tier'])):
            warnings.append(
                'Only tiered data centers need to be reported, marked as "{}"'.
                format(row.get('data center tier')))

        # Impossible PUEs

        # PUE = 1.0:
        if (row.get('avg electricity usage')
                and row.get('avg it electricity usage')
                and row.get('avg electricity usage')
                == row.get('avg it electricity usage')):

            warnings.append(
                'Avg Electricity Usage ({}) for a facility should never be equal to Avg IT Electricity Usage ({})'
                .format(row.get('avg electricity usage'),
                        row.get('avg it electricity usage')))
コード例 #41
0
def cog_validate(src_path):
    """
    Validate Cloud Optimized Geotiff.

    Parameters
    ----------
    src_path : str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.

    This script is the rasterio equivalent of
    https://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/validate_cloud_optimized_geotiff.py

    """
    errors = []
    warnings = []
    details = {}

    if not GDALVersion.runtime().at_least("2.2"):
        raise Exception("GDAL 2.2 or above required")

    config = dict(GDAL_DISABLE_READDIR_ON_OPEN="FALSE")
    with rasterio.Env(**config):
        with rasterio.open(src_path) as src:
            if not src.driver == "GTiff":
                raise Exception("The file is not a GeoTIFF")

            filelist = [os.path.basename(f) for f in src.files]
            src_bname = os.path.basename(src_path)
            if len(filelist) > 1 and src_bname + ".ovr" in filelist:
                errors.append(
                    "Overviews found in external .ovr file. They should be internal"
                )

            overviews = src.overviews(1)
            if src.width > 512 or src.height > 512:
                if not src.is_tiled:
                    errors.append(
                        "The file is greater than 512xH or 512xW, but is not tiled"
                    )

                if not overviews:
                    warnings.append(
                        "The file is greater than 512xH or 512xW, it is recommended "
                        "to include internal overviews")

            ifd_offset = int(src.get_tag_item("IFD_OFFSET", "TIFF", bidx=1))
            ifd_offsets = [ifd_offset]
            if ifd_offset not in (8, 16):
                errors.append(
                    "The offset of the main IFD should be 8 for ClassicTIFF "
                    "or 16 for BigTIFF. It is {} instead".format(ifd_offset))

            details["ifd_offsets"] = {}
            details["ifd_offsets"]["main"] = ifd_offset

            if overviews and overviews != sorted(overviews):
                errors.append("Overviews should be sorted")

            for ix, dec in enumerate(overviews):

                # NOTE: Size check is handled in rasterio `src.overviews` methods
                # https://github.com/mapbox/rasterio/blob/4ebdaa08cdcc65b141ed3fe95cf8bbdd9117bc0b/rasterio/_base.pyx
                # We just need to make sure the decimation level is > 1
                if not dec > 1:
                    errors.append(
                        "Invalid Decimation {} for overview level {}".format(
                            dec, ix))

                # Check that the IFD of descending overviews are sorted by increasing
                # offsets
                ifd_offset = int(
                    src.get_tag_item("IFD_OFFSET", "TIFF", bidx=1, ovr=ix))
                ifd_offsets.append(ifd_offset)

                details["ifd_offsets"]["overview_{}".format(ix)] = ifd_offset
                if ifd_offsets[-1] < ifd_offsets[-2]:
                    if ix == 0:
                        errors.append(
                            "The offset of the IFD for overview of index {} is {}, "
                            "whereas it should be greater than the one of the main "
                            "image, which is at byte {}".format(
                                ix, ifd_offsets[-1], ifd_offsets[-2]))
                    else:
                        errors.append(
                            "The offset of the IFD for overview of index {} is {}, "
                            "whereas it should be greater than the one of index {}, "
                            "which is at byte {}".format(
                                ix, ifd_offsets[-1], ix - 1, ifd_offsets[-2]))

            block_offset = int(
                src.get_tag_item("BLOCK_OFFSET_0_0", "TIFF", bidx=1))
            if not block_offset:
                errors.append("Missing BLOCK_OFFSET_0_0")

            data_offset = int(block_offset) if block_offset else None
            data_offsets = [data_offset]
            details["data_offsets"] = {}
            details["data_offsets"]["main"] = data_offset

            for ix, dec in enumerate(overviews):
                data_offset = int(
                    src.get_tag_item("BLOCK_OFFSET_0_0",
                                     "TIFF",
                                     bidx=1,
                                     ovr=ix))
                data_offsets.append(data_offset)
                details["data_offsets"]["overview_{}".format(ix)] = data_offset

            if data_offsets[-1] < ifd_offsets[-1]:
                if len(overviews) > 0:
                    errors.append(
                        "The offset of the first block of the smallest overview "
                        "should be after its IFD")
                else:
                    errors.append(
                        "The offset of the first block of the image should "
                        "be after its IFD")

            for i in range(len(data_offsets) - 2, 0, -1):
                if data_offsets[i] < data_offsets[i + 1]:
                    errors.append(
                        "The offset of the first block of overview of index {} should "
                        "be after the one of the overview of index {}".format(
                            i - 1, i))

            if len(data_offsets) >= 2 and data_offsets[0] < data_offsets[1]:
                errors.append(
                    "The offset of the first block of the main resolution image "
                    "should be after the one of the overview of index {}".
                    format(len(overviews) - 1))

        for ix, dec in enumerate(overviews):
            with rasterio.open(src_path, OVERVIEW_LEVEL=ix) as ovr_dst:
                if ovr_dst.width >= 512 or ovr_dst.height >= 512:
                    if not ovr_dst.is_tiled:
                        errors.append(
                            "Overview of index {} is not tiled".format(ix))

    if warnings:
        click.secho("The following warnings were found:",
                    fg="yellow",
                    err=True)
        for w in warnings:
            click.echo("- " + w, err=True)
        click.echo(err=True)

    if errors:
        click.secho("The following errors were found:", fg="red", err=True)
        for e in errors:
            click.echo("- " + e, err=True)

        return False

    return True
コード例 #42
0
ファイル: review.py プロジェクト: Epivalent/ampify
def DoReviewChecks(change,
                      committing,
                      verbose,
                      output_stream,
                      input_stream,
                      default_review,
                      may_prompt):
  """Runs all review checks that apply to the files in the change.

  This finds all .review.py files in directories enclosing the files in the
  change (up to the repository root) and calls the relevant entrypoint function
  depending on whether the change is being committed or uploaded.

  Prints errors, warnings and notifications.  Prompts the user for warnings
  when needed.

  Args:
    change: The Change object.
    committing: True if 'gcl commit' is running, False if 'gcl upload' is.
    verbose: Prints debug info.
    output_stream: A stream to write output from review tests to.
    input_stream: A stream to read input from the user.
    default_review: A default review script to execute in any case.
    may_prompt: Enable (y/n) questions on warning or error.

  Warning:
    If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
    SHOULD be sys.stdin.

  Return:
    True if execution can continue, False if not.
  """
  start_time = time.time()
  review_files = ListRelevantReviewFiles(change.AbsoluteLocalPaths(True),
                                               change.RepositoryRoot())
  if not review_files and verbose:
    output_stream.write("Warning, no .review.py found.\n")
  results = []
  executer = ReviewExecuter(change, committing)
  if default_review:
    if verbose:
      output_stream.write("Running default review script.\n")
    fake_path = os.path.join(change.RepositoryRoot(), '.review.py')
    results += executer.ExecReviewScript(default_review, fake_path)
  for filename in review_files:
    filename = os.path.abspath(filename)
    if verbose:
      output_stream.write("Running %s\n" % filename)
    # Accept CRLF review script.
    review_script = read_file(filename, 'rU')
    results += executer.ExecReviewScript(review_script, filename)

  errors = []
  notifications = []
  warnings = []
  for result in results:
    if not result.IsFatal() and not result.ShouldPrompt():
      notifications.append(result)
    elif result.ShouldPrompt():
      warnings.append(result)
    else:
      errors.append(result)

  error_count = 0
  for name, items in (('Messages', notifications),
                      ('Warnings', warnings),
                      ('ERRORS', errors)):
    if items:
      output_stream.write('** Review %s **\n' % name)
      for item in items:
        if not item._Handle(output_stream, input_stream,
                            may_prompt=False):
          error_count += 1
        output_stream.write('\n')

  total_time = time.time() - start_time
  if total_time > 1.0:
    print "Review checks took %.1fs to calculate." % total_time

  if not errors and warnings and may_prompt:
    if not PromptYesNo(input_stream, output_stream,
                       'There were review warnings. '
                       'Are you sure you wish to continue? (y/N): '):
      error_count += 1

  return (error_count == 0)