コード例 #1
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
 def create_variable(self,variable,type,dimensions=None):
     """
     create_variable() [public]
     Purpose:    Creates a variable in the file.
     Parameters: variable [type=string]
                     Name of NetCDF variable to be created.
                 type [type=string]
                     The variable primitive datatypes correspond to the dtype attribute of a numpy array. You can specify the 
                     datatype as a numpy dtype object, or anything that can be converted to a numpy dtype object. Valid 
                     datatype specifiers include: 'f4' (32-bit floating point), 'f8'  (64-bit floating point), 'i4' (32-bit 
                     signed integer), 'i2' (16-bit signed integer), 'i8' (64-bit singed integer), 'i1' (8-bit signed integer), 
                     'u1' (8-bit unsigned integer), 'u2' (16-bit unsigned integer), 'u4' (32-bit unsigned integer), 'u8' 
                     (64-bit unsigned integer), or 'S1' (single-character string). The old Numeric single-character typecodes 
                     ('f','d','h', 's','b','B','c','i','l'), corresponding to ('f4','f8','i2','i2','i1','i1','S1','i4','i4'),
                     will also work.
                 dimensions [type=tuple of strings]
                     The dimensions of the variable as a tuple of strings that name dimensions already created in the netCDF file.
                     If the variable is a scalar, only specify the variable and type.
     Returns:  An instance of the Variable class that points to the variable in the file.
     """
     if variable not in self._df.variables.keys():
         if reader.__name__ == "Nio":
             print 'Creating variable for Nio'
             if dimensions == None:
                 self._df.create_variable(variable,type,dimensions=())
             else:
                 self._df.create_variable(variable,type,dimensions)
         elif reader.__name__ in ['scipy.io.netcdf','Dataset']:
             print 'Creating variable for netcdf'
             if dimensions == None:
                 self._df.createVariable(variable,type)
             else:
                 self._df.createVariable(variable,type,dimensions)
     else:
         warning("DataIO: Variable '%s' already exists in file '%s'." % (variable, self._file_name))
コード例 #2
0
ファイル: ci.py プロジェクト: efcs/zorg
 def __init__(self, value):
     try:
         self.value = float(value)
     except:
         fatal("invalid argument: %r" % time)
     warning("'max_time' filter is deprecated, use "
             "'user_time < %.4f' filter expression" % self.value)
コード例 #3
0
ファイル: ffff.py プロジェクト: JoshKaufman/bootrom-tools
    def post_process(self, buf):
        """Post-process the FFFF header

        Process the FFFF header, assigning unspecified element locations to
        be contiguous (on erase-block-size boundaries), and read the TFTF
        files into the buffer at those locations.

        (Called by "create-ffff" after processing all arguments)
        """
        # Revalidate the erase block size
        self.erase_block_mask = self.erase_block_size - 1

        # Scan the elements and fill in missing start locations.
        # Elements are concatenated at the granuarity of the erase block size
        location = self.elements[0].element_location
        for index, element in enumerate(self.elements):
            element.index = index
            if element.element_type != FFFF_ELEMENT_END_OF_ELEMENT_TABLE:
                if element.element_location == 0:
                    element.element_location = location
                    error("Note: Assuming element [{0:d}]"
                          " loads at {1:08x}".format(element.index, location))
                if self.flash_image_length != 0 and \
                   element.element_location + element.element_length >= \
                   self.flash_image_length:
                    error("--element-location " +
                          format(element.element_location, "#x") +
                          " + --element-length " +
                          format(element.element_length, "#x") +
                          " exceeds --image-length " +
                          format(self.flash_image_length, "#x"))
                    sys.exit(PROGRAM_ERRORS)
                location = next_boundary(element.element_location +
                                         element.element_length,
                                         self.erase_block_size)
            if element.element_type == FFFF_ELEMENT_END_OF_ELEMENT_TABLE:
                break

        if self.flash_image_length == 0:
            self.flash_image_length = location

        self.validate_element_table()

        # fill in and/or trim selected FFFF fields
        self.sentinel = FFFF_SENTINEL

        self.timestamp = strftime("%Y%m%d %H%M%S", gmtime())
        if len(self.flash_image_name) >= FFFF_FLASH_IMAGE_NAME_LENGTH:
            self.flash_image_name = \
                self.flash_image_name[0:FFFF_FLASH_IMAGE_NAME_LENGTH - 1]
            warning("flash_image_name truncated to '{0:s}'".
                    format(self.flash_image_name))
        self.tail_sentinel = FFFF_SENTINEL

        # Flush the structure elements to the FFFF buffer and do a final
        # sniff test on the results
        self.pack()
        self.validate_ffff_header()
コード例 #4
0
ファイル: drives.py プロジェクト: Cybolic/vineyard
def set_type(driveletter, drive_type):
    # Make sure we use the same caps for the driveletter as the reg
    typesraw = registry.get("HKEY_LOCAL_MACHINE\\Software\\Wine\\Drives", quiet=True, shallow=True)
    for key, value in typesraw.iteritems():
        if key.lower == driveletter.lower():
            driverletter = key
    # Save it
    if drive_type not in DRIVE_TYPES or drive_type != None:
        util.warning("Setting drive type to unknown value: %s" % drive_type)
    registry.set({"HKEY_LOCAL_MACHINE\\Software\\Wine\\Drives": {"%s:" % driveletter: drive_type}})
コード例 #5
0
ファイル: binary_server.py プロジェクト: cth103/toast
    def client(self, conn):
        try:
            data = util.get_bytearray(conn)
            if len(data) > 0:
                reply = self.handler(data)
                if reply is not None:
                    util.send_bytearray(conn, reply)
        except Exception as e:
            util.warning('Server handler threw "%s"' % e)
            traceback.print_exc(file=sys.stdout)

        conn.close()
コード例 #6
0
ファイル: builder.py プロジェクト: splewis/sm-builder
def build(smbuildfile, compiler, plugins, packages, flags='', output_dir='builds', nosource=False):
    """Performs the entire build process."""
    # setup directory structure, execute user-configurations
    plugin_build_dir = os.path.join(output_dir, 'plugins')
    util.mkdir(output_dir)
    util.mkdir(plugin_build_dir)

    # scan deps for what we need to do
    packages_to_build = set()
    for name, package in packages.items():
        if smbuildfile == package.smbuildfile:
            packages_to_build.add(name)

    plugins_to_compile = set()
    for name in packages_to_build:
        for_this_package = base.find_plugin_deps(packages[name], packages)
        for plugin_name in for_this_package:
            plugins_to_compile.add(plugin_name)

            if plugin_name not in plugins:
                err = 'Package {} uses plugin {}, but it does not exist'.format(name, plugin_name)
                raise ValueError(err)

            # also make sure plugin dependencies are met by the package
            for dep in plugins[plugin_name].deps:
                if dep not in for_this_package:
                    msg = 'Plugin {} depends on {}, but is not part of package {}'
                    msg = msg.format(plugin_name, dep, name)
                    util.warning(msg)

    # also compile any plugins from this smbuildfile
    for plugin_name in plugins:
        if plugins[plugin_name].smbuildfile == smbuildfile:
            plugins_to_compile.add(plugin_name)

    # compile plugins
    compiled_count = 0
    for name in plugins_to_compile:
        plugin = plugins[name]
        if plugin.compile(compiler, plugin_build_dir, flags):
            compiled_count += 1

    # build packages
    for name in packages_to_build:
        package = packages[name]
        print('Building package {}'.format(name))
        package.create(output_dir, packages, plugins, nosource)

    if len(plugins) == 0:
        util.warning('No plugins were found in {}.'.format(smbuildfile))
    elif compiled_count == 0:
        print('All plugins up to date.')
コード例 #7
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
 def set_file_attribute(self, attribute, value):
     """
     set_file_attribute() [public]
     Purpose:    Set a file attribute, but only if the write flag is set.
     Parameters: attribute [type=string]
                     The name of the attribute to set.
                 value [type=int,float,char,string]
                     The value to put in the file.
     Returns:    [nothing]
     """
     if not self._write:
         warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
         return
     setattr(self._df, attribute, value)
     return
コード例 #8
0
ファイル: json_server.py プロジェクト: cth103/toast
    def client(self, conn):
        try:
            while True:
                json = util.receive_json(conn)
                if json is None:
                    break
                reply = self.handler(json)
                if reply is not None:
                    util.send_json(conn, reply)
        except Exception as e:
            util.warning('Server handler threw "%s"' % e)
            traceback.print_exc(file=sys.stdout)
            pass

        conn.close()
コード例 #9
0
ファイル: parser.py プロジェクト: splewis/sm-builder
def glob_files(file_list, name, warn_on_empty=False):
    """
    Support function for pattern matching on files that
    returns a list of matching files.
    """
    output = []
    current_path = os.path.join(*DirectoryStack)
    for pattern in file_list:
        matches = glob.glob(os.path.join(current_path, pattern))
        if matches:
            for f in matches:
                output.append(f)
        if not matches and warn_on_empty:
            util.warning('No files matched pattern {}'.format(pattern))

    return output
コード例 #10
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
 def set_variable(self, variable, value):
     """
     set_variable() [public]
     Purpose:    Put an array of data in the file, but only if write flag has been set.
     Parameters: variable [type=string]
                     Name of the variable to set.
                 value [type=np.array]
                     Array to put in the file.
     Returns:    [nothing]
     """
     if not self._write:
         warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
         return
     
     self._ov_variables[variable] = value
     return
コード例 #11
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
    def remove_variable(self, variable):
        """
        remove_variable() [public]
        Purpose:    Set a file variable to the remove list, to be "removed" on close.
        Parameters: variable [type=string]
                        Name of the variable to remove.
        Returns:    [nothing]
        """
        if not self._write:
            warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
            return

        if variable not in self._rm_variables:
            self._rm_variables.append(variable)
        else:
            warning("DataIO: Variable '%s' has already been removed from file '%s'." % (variable, self._file_name))
        return
コード例 #12
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
    def remove_file_attribute(self, attribute):
        """
        remove_file_attribute() [public]
        Purpose:    Set a file attribute to the remove list, to be "removed" on close.
        Parameters: attribute [type=string]
                        Name of the attribute to remove.
        Returns:    [nothing]
        """
        if not self._write:
            warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
            return

        if attribute not in self._rm_attributes['__file__']:
            self._rm_attributes['__file__'].append(attribute)
        else:
            warning("DataIO: Attribute '%s' has already been removed from file '%s'." % (attribute, self._file_name))
        return
コード例 #13
0
def streamsearch(ofile, text, max_pages=10, results_per_page=100):
    """Stream the results of searching for 'text' to the 'ofile' output file

    Args:
      ofile             str, the name of a file where we will write any tweets
                        we find. Tweets are written in JSON format, with every
                        tweet being stored in a separate line as a Python dict.
      text              str, the text to search for in Twitter. This can
                        be a plain text string or a '#hashtag' to look
                        for tweets of this topic only.
      max_pages         int, maximum number of result 'pages' to obtain
                        from Twitter's backlog of archived tweets. When
                        not specified, default to 10 pages.
      results_per_page  int, maximum number of results per page to fetch
                        from Twitter's backlog of archived tweets. When
                        not specified, default to 100 tweets per page.

    Returns:
      None
    """
    # Load the id of already seen tweets, if there are any.
    ofilename = ofile or 'standard output'
    seen = ofile and preload_tweets(ofile) or set()
    if seen:
        message('%d tweets preloaded from %s', len(seen), ofilename)
    try:
        ostream = ofile and file(ofile, 'a+') or sys.stdout
        for matches in search(text, max_pages=max_pages,
                              results_per_page=results_per_page):
            newmatches = 0
            for tweet in matches:
                (tid, tuser, text) = (tweet['id'], tweet['from_user'],
                                      tweet['text'])
                if not tid in seen:
                    newmatches += 1
                    seen.add(tid)
                    print >> ostream, json.dumps(tweet)
            if newmatches > 0:
                message('%d new tweets logged at %s', newmatches, ofilename)
        ostream.close()
    except IOError, e:
        if ostream and ostream != sys.stdout:
            ostream.close()
        warning('Error writing at file "%s". %s', ofilename, e)
コード例 #14
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
 def set_variable_attribute(self, variable, attribute, value):
     """
     set_variable_attribute() [public]
     Purpose:    Set a variable attriute, but only if the write flag is set.
     Parameters: variable [type=string]
                     Name of the variable whose attribute to set.
                 attribute [type=string]
                     Name of the attribute to set.
                 value [type=int,float,char,string]
                     The value to put in the file.
     Returns:    [nothing]
     """
     if not self._write:
         warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
         return
     if reader.__name__ in ["Nio","scipy.io.netcdf"]:
         setattr(self._df.variables[variable], attribute, value)
     elif reader.__name__ == "Dataset":
         setattr(self._df.variables[variable],attribute,value)
     return
コード例 #15
0
ファイル: hooks.py プロジェクト: abreen/socrates.py
def load_from_dict(dict_):
    from os.path import isfile, sep
    global _triggers, _hooks

    if 'hooks' not in dict_:
        return

    for trigger, fnames in dict_['hooks'].items():
        if trigger not in _triggers:
            raise ValueError("unknown trigger: '" + str(trigger) + "'")

        for fname in fnames:
            if fname in _hooks[trigger]:
                raise ValueError("duplicate hook: '" + str(fname) + "'")

            if not isfile(config.hooks_dir + sep + fname):
                util.warning("could not find hook file: '" + \
                             str(fname) + "'")

            _hooks[trigger].append(fname)
コード例 #16
0
ファイル: pythonfile.py プロジェクト: abreen/socrates.py
    def __should_fail(self, result):
        """Called before an eval test is about to be failed, but the criteria
        specifies that a human should confirm the failure before taking
        any points. If this method returns True, the points are ultimately
        taken.
        """
        import sys
        from grader import write_results
        from prompt import prompt

        util.warning("about to fail a test")

        write_results(sys.stdout, [result])

        points = result['deduction']
        s = util.plural("point", points)
        fail_msg = "fail this test (-{} {})".format(points, s)

        choices = [fail_msg, "do not fail this test"]
        return prompt(choices, '1') == [0]
コード例 #17
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
    def remove_variable_attribute(self, variable, attribute):
        """
        remove_variable_attribute () [public]
        Purpose:    Set a variable attribute to the remove list, to be "removed" on close.
        Parameters: variable [type=string]
                        Name of the variable whose attribute to remove.
                    attribute [type=string]
                        Name of the attribute to remove.
        Returns:    [nothing]
        """
        if not self._write:
            warning("DataIO: Write flag has not been set on file '%s'.  No action taken." % self._file_name)
            return

        try:
            if attribute not in self._rm_attributes[variable]:
                self._rm_attributes[variable].append(attribute)
            else:
                warning("DataIO: Attribute '%s' has already been removed from variable '%s' in file '%s'." % (attribute, variable, self._file_name))
        except KeyError:
            self._rm_attributes[variable] = []
            self._rm_attributes[variable].append(attribute)
        return
コード例 #18
0
    def openFile(self, filename):
        # Sanitize name
        root, ext = os.path.splitext(filename)
        if not ext:
            filename += '.xml'
        if not os.path.isfile(filename):
            return util.warning(self,
                                'File({0}) does not exist'.format(filename))

        # Check for doubles
        for idx, config in enumerate(self.getConfigs()):
            if config.getFilename() == filename:
                self.switchTab(idx)
                return util.warning(
                    self, 'File "{0}" is already open'.format(filename))

        # New config
        config = SirannonConfiguration()
        config.loadFromFile(filename)

        # Check for outdated parameters and ask user for removal
        if config.proofRead() < 0:
            config.sync()
            dialog2 = self.getWidget('QuestionDialog')
            dialog2.set_markup(
                'Configuration contains outdated or unkown parameters for some components. Do you wish to remove them?'
            )
            status = dialog2.run()
            dialog2.hide()
            sys.stdout.flush()
            if status == -8:
                config.proofRead(correct=True)
        # Show the opened XML
        self.newTab(config)
        self.switchTab(-1, 1)
        return True
コード例 #19
0
    def build(self, jobs='1'):
        packagelist = importer.get_modulelist(builder)
        packagelist.remove('base')

        self.builders = {}
        self.projects = {}
        for p in packagelist:
            self.builders[p] = importer.get_module(builder, p)
            importer.require(p)

        self.files = {}
        self.files['bin'] = []
        self.files['lib'] = []
        self.files['share'] = []

        for r in self.config['repo']:
            if 'root' in r:
                path = os.path.join(r['path'], r['root'])
            else:
                path = r['path']

            if not 'exclude' in r:
                r['exclude'] = []

            if 'type' in r:
                print r['type']
                self.projects[r['path']] = self.builders[r['type']].Builder(
                    path, self.repos[r['path']].get_version())

                outfiles = self.projects[r['path']].build(jobs, r['exclude'])
                for f in self.files:
                    outfiles[f] = [x for x in outfiles[f] if x]
                    self.files[f].extend(outfiles[f])

            else:
                util.warning("No type declared for", r['path'], "; skipping")
コード例 #20
0
def __processSplit(command, keys, attributes, timestamp, source, backfill,
                   processCoid, processSecid):
    if "SPLIT" not in attributes:
        return

    secid = database.getSecidFromCsid(keys["GVKEY"], keys["IID"], timestamp)
    if not processSecid(secid):
        return

    if secid is None:
        secid = database.createNewCsid(keys["GVKEY"], keys["IID"], timestamp)
        util.warning("Created new secid: {}.{}=>{}".format(
            keys['GVKEY'], keys['IID'], secid))
    if backfill:
        timestamp = min(
            timestamp,
            util.convert_date_to_millis(__datePlusOne(keys["DATADATE"])))

    code = database.getAttributeType("SPLIT", source, "n", database.SPLIT)
    updates = (0, 0)
    if command in ("I", "C"):
        updates = database.insertTimelineRow(database.SPLIT, {
            "secid": secid,
            "date": int(keys["DATADATE"])
        }, {
            "backfill": backfill,
            "rate": float(attributes["SPLIT"])
        }, timestamp)
    elif command in ("D", "R"):
        updates = database.killOrDeleteTimelineRow(
            database.SPLIT, {
                "secid": secid,
                "date": int(keys["DATADATE"])
            }, timestamp)

    database.updateAttributeStats(code, *updates)
コード例 #21
0
ファイル: db_check.py プロジェクト: ryan-leung/ml_monorepo
def __verifySymbols(rows, start, end, data, warnings, stats):
    #check that born<died for each row. report rows with born=died and born>died
    for i in range(start, end):
        if rows[i]["born"] == rows[i]["died"]:
            if warnings: util.warning("born=died: {}".format(rows[i]))
            stats["row born=died"] = stats["row born=died"] + 1
            stats["warnings"] = stats["warnings"] + 1
        elif rows[i]["died"] is not None and rows[i]["born"] > rows[i]["died"]:
            util.error("born>died: {}".format(rows[i]))
            stats["row born>died"] = stats["row born>died"] + 1
            stats["errors"] = stats["errors"] + 1
    #check that each row was born at the point the other died
    #chck if consecutive rows have the same data
    for i in range(start + 1, end):
        if rows[i - 1]["died"] is None or rows[
                i - 1]["died"] > rows[i]["born"]:  #overlapping rows
            #do a more thorough check
            if rows[i - 1]["country"] != rows[i]["country"] or rows[
                    i - 1]["coid"] == rows[i]["coid"]:
                if warnings:
                    util.warning("benign overlap: {} | {}".format(
                        rows[i - 1], rows[i]))
                stats["benign overlap"] = stats["benign overlap"] + 1
                stats["warnings"] = stats["warnings"] + 1
            else:
                util.error("overlapping rows: {} | {}".format(
                    rows[i - 1], rows[i]))
                stats["overlapping row"] = stats["overlapping row"] + 1
                stats["errors"] = stats["errors"] + 1
        elif rows[i - 1]["died"] < rows[i]["born"]:
            if warnings:
                util.warning("gap in the timeline: {} | {}".format(
                    rows[i - 1], rows[i]))  #timeline gap
            stats["gap"] = stats["gap"] + 1
            stats["warnings"] = stats["warnings"] + 1
        elif util.dict_fields_eq(rows[i - 1], rows[i], data):
            if warnings:
                util.warning("consecutive rows with same data: {} | {}".format(
                    rows[i - 1], rows[i]))
            stats["consecutive with same data"] = stats[
                "consecutive with same data"] + 1
            stats["warnings"] = stats["warnings"] + 1
コード例 #22
0
ファイル: generateEep.py プロジェクト: oxidation99/yadoms
 def statesCode(xmlTypeNode):
     if len(xmlTypeNode.findall("case")) != 1:
         util.warning(
             "func/type : Unsupported number of \"case\" tags (expected 1) for \""
             + xmlTypeNode.find("title").text.encode("utf-8") +
             "\" node. This profile will be ignored.")
         return "   return m_historizers;\n"
     code = ""
     for xmlDataFieldNode in xmlHelper.findUsefulDataFieldNodes(
             inXmlNode=xmlTypeNode.find("case")):
         if isLinearValue(xmlDataFieldNode):
             dataText = xmlDataFieldNode.find("data").text
             if dataText == "Temperature" or \
                dataText == "Humidity" or \
                dataText == "Barometer" or \
                dataText == "Supply voltage" or \
                dataText == "Illumination" or \
                dataText == "Illuminance":
                 code += statesCodeForLinearValue(xmlDataFieldNode)
             elif str(dataText.encode("utf-8")) == "Sun – West" \
                or str(dataText.encode("utf-8")) == "Sun – South" \
                or str(dataText.encode("utf-8")) == "Sun – East": # Provided as kilo-lux when Yadoms knows only lux
                 code += statesCodeForLinearValue(
                     xmlDataFieldNode, 1000)
             elif dataText == "Energy Storage":
                 code += statesCodeForLinearValue(xmlDataFieldNode,
                                                  applyCoef=None,
                                                  finalCast="int")
             else:
                 util.warning(
                     "func/type : Unsupported linear data type \"" +
                     str(dataText.encode("utf-8")) + "\" for \"" +
                     str(
                         xmlTypeNode.find("title").text.encode(
                             "utf-8")) +
                     "\" node. This data will be ignored.")
                 continue
         elif isBoolValue(xmlDataFieldNode):
             code += statesCodeForBoolValue(xmlDataFieldNode)
         else:
             util.warning("func/type : Unsupported data type \"" +
                          str(
                              xmlDataFieldNode.find(
                                  "data").text.encode("utf-8")) +
                          "\" for \"" + str(
                              xmlTypeNode.find("title").text.encode(
                                  "utf-8")) +
                          "\" node. This data will be ignored.")
             continue
     code += "   return m_historizers;"
     return code
コード例 #23
0
ファイル: parser.py プロジェクト: armsnyder/cheese-whiz
def remove_unicode(text):
    """
    Cleans ingredient text from allrecipes
    :param text: text with unicode
    :return: text without unicode
    """
    # TODO: Make sure jalapeno works with this encoding
    try:
        decoded_text = text.decode('unicode_escape')
    except UnicodeDecodeError:
        util.warning('UnicodeDecodeError on decode: '+text)
        decoded_text = ''
    except UnicodeEncodeError:
        util.warning('UnicodeEncodeError on decode: '+text)
        decoded_text = ''
    try:
        encoded_text = decoded_text.encode('utf-8')
    except UnicodeDecodeError:
        util.warning('UnicodeDecodeError on encode: '+text)
        encoded_text = ''
    except UnicodeEncodeError:
        util.warning('UnicodeEncodeError on encode: '+text)
        encoded_text = ''
    return regex.uni.sub('', encoded_text)
コード例 #24
0
def __processDividend(command, keys, attributes, timestamp, source, backfill,
                      global_cs):
    if global_cs and not keys["IID"].endswith("W"):
        return
    elif not global_cs and keys["IID"].endswith("W"):
        return

    secid = database.getSecidFromCsid(keys["GVKEY"], keys["IID"], timestamp)

    if secid is None:
        secid = database.createNewCsid(keys["GVKEY"], keys["IID"], timestamp)
        util.warning("Created new secid: {}.{}=>{}".format(
            keys['GVKEY'], keys['IID'], secid))
    if backfill == 1:
        timestamp = min(
            timestamp,
            util.convert_date_to_millis(__datePlusOne(keys["DATADATE"])))

    code = database.getAttributeType("DIVIDEND", source, "n",
                                     database.DIVIDEND)
    updates = (0, 0)
    if command in ("I"):
        data = {
            "backfill": backfill,
            "currency": database.getCurrencyType(keys["CURCDDV"])
        }
        #get the data that we track and translate them to our own names. make also sure that you get the attribute types right
        for k, v in __usToCsDividendTranslate.iteritems():
            value = attributes.get(v, None)
            if value is not None:
                data[k] = float(value)
            else:
                data[k] = value  #i.e., None
        #finally do the insertion
        updates = database.insertTimelineRow(database.DIVIDEND, {
            "secid": secid,
            "date": int(keys["DATADATE"])
        }, data, timestamp)
    elif command in ("R"):
        updates = database.killOrDeleteTimelineRow(
            database.DIVIDEND, {
                "secid": secid,
                "date": int(keys["DATADATE"])
            }, timestamp)
    elif command in ("C", "D"):
        data = {
            "backfill": backfill,
            "currency": database.getCurrencyType(keys["CURCDDV"])
        }
        for n, v in attributes.iteritems(
        ):  #for each attribute from the compustat line
            if n in __csToUsDividendTranslate:  #if it is among the ones we track
                ourName = __csToUsDividendTranslate[n]
                if v is not None:  #else
                    data[ourName] = float(v)
                else:
                    data[ourName] = None  #i.e None
        updates = database.updateTimelineRow(database.DIVIDEND, {
            "secid": secid,
            "date": int(keys["DATADATE"])
        }, data, timestamp)

    database.updateAttributeStats(code, *updates)
コード例 #25
0
    else:
        util.set_log_file("all", True)

    if options.db == "pri":
        newdb.init_db()
        database = newdb.get_db()
    elif options.db == "sec":
        newdb.init_db(os.environ["SEC_DB_CONFIG_FILE"])
        database = newdb.get_db()
    else:
        util.error("Valid database choices are [pri|sec]")
        sys.exit(1)

    # Check for previously running instance
    if not database.getProcessedFilesLock():
        util.warning("Not processing, previous instance running")
        sys.exit(1)

        #XXX may want to precache seen files for speed in loading
    try:
        for source in options.source.split("+"):
            util.info("Processing source %s" % source)
            from data_sources.file_source import FileSource

            util.info("Indexing new files for %s" % source)
            fs = FileSource()
            files = []
            sconfig = config.load_source_config(source)

            # List files
            fs.cwd("%s/%s" % (os.environ['DATA_DIR'], sconfig['local_dir']))
コード例 #26
0
 def evaluate(self, command):
     warning("'negate' filter is deprecated, use 'not result' "
             "filter expression")
     command.result = not command.result
コード例 #27
0
def parse_entry(tokens, meet):
    """
    Parses a "D-Line" in a Hytek entry file that corresponds to an athlete's
    entry into a particular event within a meet.

    D-Line format
    <Token#> <Data> <MaxChar>  <Description>
    0   D           1   Individual Entry Record
    1   Last Name   20  (Required)
    2   First Name  20  (Required)
    3   Initial     1   (Optional)
    4   Gender      1   M = Male, F = Female (Required)
    5   Birth Date  10  MM/DD/YYYY (Optional)
    6   Team Code   4   4 characters max; use UNA if unknown (Required)
    7   Team Name   30  Use Unattached if unknown (Required)
    8   Age         3   Age is optional if birth date provided
    9  School Year  2   (Optional for HyTek, but not for TMS)
    10  Event Code  10  Examples: 100, 5000S, 10000W, SP, HJ, DEC
    11  Entry Mark  11  Time: hh:mm:ss.tt (1:23.44.55, 1:19.14, 58.83, 13.4h)
                        Field Metric: 12.33, 1233;
                        Field English: 12-10.25", 12', 121025, 12' 10
                        Combined-event: 3020 (points)
    12  Mark measure 1  M for Metric, E for English (Required if Entry Mark
                        provided)
    """

    if len(tokens) < 11:
        error(f"HyTek D-Line requires at least 11 fields. <{tokens}>")

    (_junk1, last_name, first_name, middle, gender, _junk2, team_code,
     team_name, _junk3, grade, ht_event_code) = tokens[0:11]

    athlete = Athlete.add_athlete_to_db(first_name, middle, last_name, gender,
                                        grade, team_code, team_name)

    # If the athlete's record in file was bad, add_athlete_to_db returns None.
    if athlete is None:
        warning(f"Skipping athlete {first_name} {last_name}")
        return

    # translate the HyTek event names into TMS event codes
    event_code = ht_event_translator.get(ht_event_code, ht_event_code)

    q = MeetDivisionEvent.query
    try:
        mde = q.filter_by(meet_id=meet.id,
                          event_code=event_code,
                          div_id=athlete.division.id).one()
    except NoResultFound:
        raise TmsError("MDE doesn't exist: meet #{}, event {}, div {}".format(
            meet.id, event_code, athlete.division.code))

    entry = Entry(athlete=athlete, mde=mde)
    # we need to commit here, or else we can't see the event in the below call
    # of entry.set_mark method.
    db.session.add(entry)
    db.session.commit()
    info(f"Added entry: {entry}")

    # If the athlete's entry includes a seed mark for this event, set it
    if len(tokens[11:13]) == 2:
        entry.set_mark(mark_string=tokens[11], mark_measure_type=tokens[12])

    # I don't understand why, but at this point the entry thinks its "event"
    # attribute is "None", even after I setup the relationship with the mde,
    # which should also get me the event. I believe this shoudl be possible
    # without adding the entry to the session and commiting.
    db.session.commit()
    info("Set entry's mark. Entry: {}. Mark:{}".format(entry.event.code,
                                                       entry.mark_to_string()))
コード例 #28
0
                cppHistorizerClass.addConstructor(cppClass.CppClassConstructor("const std::string& keywordName", \
                   "CSingleHistorizableData<" + historizerEnumName + ">(keywordName, yApi::CStandardCapacity(\"" + historizerEnumName + "\", yApi::CStandardUnits::NoUnit(), yApi::EKeywordDataType::kNoData), yApi::EKeywordAccessMode::kGet)"))
                cppHistorizerClass.addDependency(
                    cppClass.CppExtendedEnumType(historizerEnumName,
                                                 enumValues))
                return cppHistorizerClass

            def printCtorExtraParameters(ctorExtraParameters):
                if not ctorExtraParameters:
                    return ""
                return ", ".join(ctorExtraParameters)

            historizersCppName = []
            if len(xmlTypeNode.findall("case")) != 1:
                util.warning(
                    "func/type : Unsupported number of \"case\" tags (expected 1) for \""
                    + xmlTypeNode.find("title").text.encode("utf-8") +
                    "\" node. This profile will be ignored.")
            else:
                for xmlDataFieldNode in xmlHelper.findUsefulDataFieldNodes(
                        inXmlNode=xmlTypeNode.find("case")):
                    dataText = xmlDataFieldNode.find("data").text
                    keywordName = xmlDataFieldNode.find(
                        "shortcut").text + " - " + dataText
                    historizerCppName = "m_" + cppHelper.toCppName(keywordName)
                    cppHistorizerClassName = ""
                    ctorExtraParameters = []
                    if isLinearValue(xmlDataFieldNode):
                        if dataText == "Temperature":
                            if not supportedUnit(xmlDataFieldNode, u"°C"):
                                continue
                            cppHistorizerClassName = "yApi::historization::CTemperature"
コード例 #29
0
ファイル: yearn.py プロジェクト: ryan-leung/ml_monorepo
def _parseFile(filepath):
    #this should only happen when we process the first file ever
    if filepath is None:
        return {},None,None,None
    
    info = datafiles.read_info_file(filepath)
    if os.path.basename(filepath).startswith("yearn_archive.txt"):
        backfill = 1
        archive = True
    elif info['date_last_absent'] is None:
        timestamp = util.convert_date_to_millis(info['date_modified'])
        backfill = 1
        archive = False
    else:
        timestamp = util.convert_date_to_millis(info['date_first_present'])
        backfill = 0
        archive = False
    
    file = open(filepath, "r")
    data={}
    
    for line in file:
        line = line.rstrip("\n")
        
        # Parse date
        # XXX all dates need to be in UTC based on exchange of stock
        annDate, name, ticker, value, time = line.split("\t")
        if time == 'Time Not Supplied':
            exactAnnDate = annDate + ' 00:00 UTC'
        elif time == 'Before Market Open':
            exactAnnDate = annDate + ' 08:00 EST'
        elif time == 'After Market Close':
            exactAnnDate = annDate + ' 17:00 EST'
        else:
            exactAnnDate = annDate +" "+ time.replace("ET", "EST")
        
        #annDate to millis
        try:
            exactAnnDate = util.convert_date_to_millis(exactAnnDate)
        except:
            util.warning("Failed to parse {}".format(exactAnnDate))
            print "Failed to parse {}".format(exactAnnDate)
            continue
        if archive:
            timestamp = util.convert_date_to_millis(annDate) - util.convert_date_to_millis(datetime.timedelta(days=30))
        
        secid = database.getSecidFromXref("TIC", ticker, timestamp, "compustat_idhist", newdb.xrefsolve.preferUS)
        if secid is None:
            util.warning("Failed to map ticker {}".format(ticker))
            continue

        coid, issueid = database.getCsidFromSecid(secid)
        assert coid is not None
    
        data[(coid,exactAnnDate,backfill)]=annDate
        #data.append((coid,exactAnnDate,backfill,timestamp))
    
    file.close()
        
    #get the file start date from the filename
    if not archive:
        startDate=os.path.normpath(filepath).split("/")[-1][0:8] #split the filepath, take last token and its first 8 chars
    else:
        startDate="20060101"
            
    return (data,archive,startDate,timestamp)
コード例 #30
0
ファイル: hmmmfile.py プロジェクト: abreen/socrates.py
    def run(self, _):
        import io
        import sys

        util.info("running HMMM test")

        if self.input is not None:
            in_buf = io.StringIO(self.input)
            sys.stdin = in_buf

        if self.output is not None:
            out_buf = io.StringIO()
            sys.stdout = out_buf

        try:
            hmc.run(self.file.binary_name, debug=False)
        except KeyboardInterrupt:
            sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

            desc = "test failed because the grader halted the program"
            util.warning(desc)

            err = filter(_not_boring, out_buf.getvalue().split('\n')[-5:-1])

            return {'deduction': self.deduction,
                    'description': self.description,
                    'notes': [desc] + list(err)}

        except SystemExit:
            sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

            util.warning("failing test because the simulator exited uncleanly")

            err = filter(_not_boring, out_buf.getvalue().split('\n')[-5:-1])

            return {'deduction': self.deduction,
                    'description': "simluator exited with an error",
                    'notes': err}

        # restore default standard in/out
        sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

        if self.output is not None:
            output = out_buf.getvalue()

        passed = True
        if self.output is not None:
            passed = passed and self.__output_matches(output)

        if passed:
            return None
        else:
            result = {'deduction': self.deduction,
                      'description': self.description,
                      'notes': []}

            if self.output is not None and type(self.output) is str:
                eo = self.output.split('\n')
                po = output.split('\n')

                result['notes'].append("expected output:")
                result['notes'] += eo
                result['notes'].append("produced output:")
                result['notes'] += po

            return result
コード例 #31
0
ファイル: pythonfile.py プロジェクト: abreen/socrates.py
    def run_tests(self):
        import sys
        import io
        import os

        results = dict()
        results[self] = []

        actual_setrecursionlimit = sys.setrecursionlimit

        def intercept_stacksize_change(new_val):
            util.info("intercepting call to sys.setrecursionlimit()")
            old_val = sys.getrecursionlimit()
            if new_val < old_val:
                util.info("keeping stack size at " + str(old_val))
                return
            if new_val > MAX_STACK_SIZE:
                util.info("code wants to set stack size too large")
                util.info("keeping stack size at " + str(old_val))
                return
            else:
                util.info("growing stack size to " + str(new_val))
                actual_setrecursionlimit(new_val)

        sys.setrecursionlimit = intercept_stacksize_change

        try:
            directory, name = os.path.split(self.path)
            mod_name = name[:name.index('.py')] if '.py' in name else name

            sys.path.append(directory)

            util.info("importing module '{}'".format(mod_name))

            # redirect standard out to empty buffer to "mute" the program
            #sys.stdout = io.StringIO()
            module_context = __import__(mod_name)
            #sys.stdout = sys.__stdout__

            util.info("finished importing module".format(mod_name))

        except:
            # "un-mute" the program and give socrates access to stdout
            #sys.stdout = sys.__stdout__

            import traceback

            err = sys.exc_info()

            util.error("encountered an error importing "
                   "'{}' module ({})".format(mod_name, err[0].__name__))

            traceback.print_exc()

            if self.error_deduction:
                deduction = self.error_deduction
            else:
                deduction = self.point_value

            util.warning("deducting {} points for import "
                         "error".format(deduction))

            return [{'deduction': deduction,
                     'description': "error importing '{}'".format(self.path),
                     'notes': ["encountered {}".format(err[0].__name__)]}]

        found_functions = self.__get_members(module_context, 'functions')
        found_classes = self.__get_members(module_context, 'classes')
        found_variables = self.__get_members(module_context, 'variables')

        for test in self.tests:
            result = test.run(module_context)
            if result is not None:
                util.add_to(result, results[self])

        for func in self.functions:
            results[func] = []
            if func not in found_functions:
                results[func].append({'deduction': func.point_value,
                                      'description': "missing "
                                                     "{}".format(func)})
                continue

            for test in func.tests:
                # TODO fix this hacky thing
                if type(test) is TestSet:
                    for m in test.members:
                        m.target = func

                result = test.run(module_context)
                if result is not None:
                    util.add_to(result, results[func])

        for cls in self.classes:
            results[cls] = []
            if cls not in found_classes:
                results[cls].append({'deduction': cls.point_value,
                                     'description': "missing "
                                                    "{}".format(cls)})
                continue

            # TODO move this into __get_members
            cls_obj = _find_class_from_cxt(module_context, cls.name)
            import inspect

            found_methods = []
            for m in inspect.getmembers(cls_obj, inspect.isfunction):
                for method in cls.methods:
                    if method.name == m[0]:
                        found_methods.append(method)


            for method in cls.methods:
                results[method] = []

                if method not in found_methods:
                    results[method].append({'deduction': method.point_value,
                                            'description': "missing "
                                                        "{}".format(method)})
                    continue

                for test in method.tests:
                    # TODO fix this hacky thing
                    if type(test) is TestSet:
                        for m in test.members:
                            m.target = method

                    result = test.run(module_context)
                    if result is not None:
                        util.add_to(result, results[method])

        for var in self.variables:
            results[var] = []
            if var not in found_variables:
                results[var].append({'deduction': var.point_value,
                                     'description': "missing "
                                                    "{}".format(var)})
                continue

            for test in var.tests:
                # TODO fix this hacky thing
                if type(test) is TestSet:
                    for m in test.members:
                        m.target = var

                result = test.run(module_context)
                if result is not None:
                    util.add_to(result, results[var])

        for target, failures in results.items():
            sum = 0
            for f in failures:
                if 'deduction' in f:
                    # deduction is at top level
                    sum += f['deduction']

                    if sum > target.point_value:
                        f['deduction'] = 0

                elif 'subresults' in f:
                    # deduction for this failure is made up of subresults
                    for subfailure in f['subresults']:
                        sum += subfailure['deduction']

                        if sum > target.point_value:
                            subfailure['deduction'] = 0

        return [item for subl in results.values() for item in subl]
コード例 #32
0
  row = parse(line)

  # Trim whitespace in each field
  row = [field.strip() for field in row]

  # Convert to dict using header
  row = dict(zip(header, row))

  # Filter out records where action code is not 'A'
  if row['action_code'] != 'A':
    continue

  # Use full certification year instead of last 2 chars
  if row['year']:
    row['year'] = '20' + row['year']

  # Enforce numeric fields
  for field in numeric_fields:
    try:
      row[field] = int(row[field])
    except ValueError:
      warning('[{0}] Invalid integer conversion of {1} for "{2}"'.format(
          row['parcel_number'], field, row[field]))
      row[field] = 0

  # Construct unique record id from property id + certification year
  row['record_id'] = '{0}{1}'.format(row['parcel_number'], row['year'])

  # Filter out 
  writer.writerow(row)
コード例 #33
0
ファイル: pythonfile.py プロジェクト: abreen/socrates.py
    def __run_function(self, context):
        import sys
        import io
        import random

        mod_name = context.__name__
        fn_name = self.target.name
        testing_method = type(self.target) is PythonMethod

        before = self.before

        if before and type(before) is CriteriaObject:
            before = _convert_using_cxt(context, before)

        args = self.__get_args(mod_name, context)

        if type(args) is tuple:
            return {'deduction': self.deduction,
                    'description': self.description,
                    'notes': ["cannot test function",
                              "unexpected number of parameters "
                              "(expected {}, submission has {})".format(
                              args[0], args[1])]}

        locals = {mod_name: context}
        vars = util.ALPHABET[:len(args)]
        args_strings = []

        for i in range(len(vars)):
            locals[vars[i]] = args[i][1]
            args_strings.append("{}={}".format(args[i][0], vars[i]))

        fn_call = "{}({})".format(fn_name, ', '.join(args_strings))

        if testing_method:
            locals["obj"] = before
            code = "obj." + fn_call
        else:
            code = mod_name + "." + fn_call

        if not self.description:
            self.description = self.__build_description()

        # redirect standard in to buffer
        if self.input is not None:
            in_buf = io.StringIO(self.input)
        else:
            in_buf = io.StringIO()

        sys.stdin = in_buf

        # redirect standard out to buffer
        out_buf = io.StringIO()
        sys.stdout = out_buf

        if self.random_seed:
            random.seed(self.random_seed)

        try:
            return_value = eval(code, globals(), locals)
        except KeyboardInterrupt:
            sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

            util.warning("interrupting a test")

            return {'deduction': self.deduction,
                    'description': self.description,
                    'notes': ["test was interrupted by the grader"]}
        except:
            import sys
            err = sys.exc_info()

            sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

            util.warning("failing a test due to an error ({})".format(err[1]))

            return {'deduction': self.deduction,
                    'description': self.description,
                    'notes': [str(err[1]) + " (" + str(err[0].__name__) + ")"]}

        # restore default standard in/out
        sys.stdin, sys.stdout = sys.__stdin__, sys.__stdout__

        if self.output is not None:
            output = out_buf.getvalue()

        passed = True
        if self.value is not None:
            passed = passed and self.value == return_value
        if self.output is not None:
            passed = passed and self.__output_matches(output)

        # if we are testing a method and there are post-state requirements
        # for the method, fail the test if the object doesn't match the
        # required post-state
        if testing_method and self.after is not None:
            if not _attributes_equal(locals["obj"], self.after):
                passed = False

        if passed:
            return None
        else:
            result = {'deduction': self.deduction,
                      'description': self.description,
                      'notes': []}

            if self.arguments:
                for arg, val in self.arguments:
                    s = "where '{}' is {}".format(arg, _safe_str(val))
                    result['notes'].append(s)

            if testing_method and before is not None:
                result['notes'].append("called object before "
                                       "the method call: "
                                       "{}".format(_safe_str(before)))

            if self.value is not None:
                result['notes'].append("expected value: " + \
                                       _safe_str(self.value))
                result['notes'].append("produced value: " + \
                                       _safe_str(return_value))

            if self.output is not None and type(self.output) is str:
                result['notes'].append("expected output:")
                result['notes'].extend(self.output.split('\n'))
                result['notes'].append("produced output:")
                result['notes'].extend(output.split('\n'))

            if testing_method and self.after is not None:
                result['notes'].append("expected object after "
                                       "the method runs: "
                                       "{}".format(_safe_str(self.after)))

            if self.prompt:
                if self.__should_fail(result):
                    return result
                else:
                    return None
            else:
                return result
コード例 #34
0
        ostream.close()
    except IOError, e:
        if ostream and ostream != sys.stdout:
            ostream.close()
        warning('Error writing at file "%s". %s', ofilename, e)

if __name__ == '__main__':
    json_filename = None                # Where to store matching tweets
    lookup_text = None                  # Text to search for

    # Parse command-line args for output file name.
    parser = argparse.ArgumentParser(description=(
        'Collect tweets matching a text pattern and store them'
        'continuously in JSON-formatted lines of a local file.'))
    parser.add_argument('-o', '--output', metavar='FILE', type=str,
        default=None, help='output file name')
    parser.add_argument('TEXT', nargs='+', type=str, default=None,
        help='text to search for in tweet content')
    args = parser.parse_args()

    json_filename = args.output         # Where to store matching tweets
    lookup_text = ' '.join(args.TEXT)   # Text to search for

    # Keep searching for tweets, until manually interrupted.
    while True:
        try:
            streamsearch(json_filename, lookup_text)
        except TwitterHTTPError, e:
            warning('Skipping HTTP error %s [...]', str(e).split('\n')[0])
            pass
コード例 #35
0
ファイル: logisimfile.py プロジェクト: abreen/socrates.py
    def run_tests(self):
        from logisim.errors import NoValueGivenError

        logisim_file = logisim.load(self.path)
        broken = logisim_file.broken

        results = dict()
        for c in self.circuits:
            results[c] = []

            # check if circuit couldn't be parsed
            if c.name in broken:
                desc = str(c) + " has major wiring issues"

                util.warning(desc)

                results[c].append({'deduction': c.error_deduction,
                                   'description': desc})
                continue

            circuit = logisim_file.get_circuit(c.name)

            # check if circuit is missing
            if circuit is None:
                util.warning("missing " + str(c))

                results[c].append({'deduction': c.point_value,
                                   'description': "missing " + str(c)})
                continue

            # check if circuit's output pins have any input
            for pin in circuit.output_pins:
                if len(pin.input_from) > 0:
                    break
            else:
                desc = "output pins of " + str(c) + " are not connected " + \
                       "to anything"

                util.warning(desc)

                results[c].append({'deduction': c.error_deduction,
                                   'description': desc})
                continue

            # check that the circuit's pins have labels
            without_labels = []
            for pin in circuit.input_pins + circuit.output_pins:
                if not hasattr(pin, 'label'):
                    without_labels.append(repr(pin))

            if without_labels:
                if len(without_labels) == 1:
                    desc = "a pin is missing a label in " + str(c)
                else:
                    desc = "pins are missing labels in " + str(c)

                util.warning(desc)

                results[c].append({'deduction': c.error_deduction,
                                   'description': desc,
                                   'notes': without_labels})
                continue

            # check that the circuit has the pins we require
            label_errors = _check_labels(c, circuit)
            if label_errors:
                desc = str(c) + " is missing required pins"

                util.warning(desc)

                results[c].append({'deduction': c.error_deduction,
                                   'description': desc,
                                   'notes': label_errors})
                continue

            # actually run any tests
            try:
                for t in c.tests:
                    result = t.run(circuit)
                    if result:
                        outer_result = {'description': str(c) + \
                                                       " failed a test"}

                        outer_result['subresults'] = [result]
                        results[c].append(outer_result)

            except NoValueGivenError as e:
                desc = str(c) + " could not be tested"
                results[c].append({'deduction': c.error_deduction,
                                   'description': desc,
                                   'notes': [str(e)]})

        return [item for subl in results.values() for item in subl]
コード例 #36
0
ファイル: concourse.py プロジェクト: pombredanne/cc-utils
def deploy_concourse_landscape(
        config_set: ConfigurationSet,
        deployment_name: str='concourse',
        timeout_seconds: int=180,
):
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    concourse_cfg = config_set.concourse()

    # Kubernetes cluster config
    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory.container_registry(image_pull_secret_name)
    cr_credentials = container_registry.credentials()

    # TLS config
    tls_config_name = concourse_cfg.tls_config()
    tls_config = config_factory.tls_config(tls_config_name)
    tls_secret_name = concourse_cfg.tls_secret_name()

    # Helm config
    helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config()
    default_helm_values = config_factory.concourse_helmchart(helm_chart_default_values_name).raw
    helm_chart_values_name = concourse_cfg.helm_chart_values()
    custom_helm_values = config_factory.concourse_helmchart(helm_chart_values_name).raw

    # Proxy config
    if concourse_cfg.proxy():
        proxy_cfg_name = concourse_cfg.proxy()
        proxy_cfg = config_factory.proxy(proxy_cfg_name)

        info('Creating config-maps for the mitm proxy ...')
        create_proxy_configmaps(
            proxy_cfg=proxy_cfg,
            namespace=deployment_name,
        )

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    warning(
        'Teams will not be set up properly on Concourse if the deployment times out, '
        'even if Helm eventually succeeds. In this case, run the deployment command again after '
        'Concourse is available.'
    )

    instance_specific_helm_values = create_instance_specific_helm_values(
        concourse_cfg=concourse_cfg, config_factory=config_factory,
    )
    chart_version = concourse_cfg.helm_chart_version()

    # Add proxy sidecars to instance specific values.
    # NOTE: Only works for helm chart version 3.8.0 or greater
    if concourse_cfg.proxy():
        chart_version_semver = semver.parse_version_info(concourse_cfg.helm_chart_version())
        min_version = semver.parse_version_info('3.8.0')
        if chart_version_semver >= min_version:
            instance_specific_helm_values = add_proxy_values(
                config_set=config_set,
                instance_specific_values=instance_specific_helm_values,
            )
        else:
            fail('Proxy deployment requires the configured helm chart version to be at least 3.8.0')

    execute_helm_deployment(
        kubernetes_config,
        deployment_name,
        'stable/concourse',
        deployment_name,
        default_helm_values,
        custom_helm_values,
        instance_specific_helm_values,
        chart_version=chart_version,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kube_ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent(
                """No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """
            ).format(
                t = timeout_seconds,
                ns = deployment_name,
            )
        )
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)
コード例 #37
0
ファイル: reconcile.py プロジェクト: ryan-leung/ml_monorepo
    parser.print_help(util.LOGFILE)
    exit(1)

#Only single day should be output to screen
if (args.singleDate is None) and (args.report is False):
    parser.print_help(util.LOGFILE)
    exit(1)

date = fromDate
while date < toDate:

    try:
        reco = backoffice.singleDayReconcile(date, args.old)
        if args.report:
            output = open(
                os.environ["REPORT_DIR"] + "/borecon/" +
                date.strftime("%Y%m%d") + ".txt", "w")
        else:
            output = sys.stdout
        output.write(reco)
        if output != sys.stdout:
            output.close()
        if args.email:
            email(reco, date.strftime("%Y%m%d"))
        util.info("Reconciled day {}".format(date.strftime("%Y%m%d")))
    except backoffice.PositionSourceError:
        util.warning("Data to reconcile day not found: {}\n".format(
            date.strftime("%Y%m%d")))
    finally:
        date = date + dayDelta
コード例 #38
0
ファイル: ci.py プロジェクト: efcs/zorg
 def evaluate(self, command):
     warning("'negate' filter is deprecated, use 'not result' "
             "filter expression")
     command.result = not command.result
コード例 #39
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
import numpy as np
from util import warning, fatalError

#from callCount import FunctionCallCount

try:
    # Look for PyNIO ...
    import Nio as reader
    print "Using PyNIO as data reader."
except ImportError:
    # PyNIO is not installed, so ...
    try:
        from netCDF4 import Dataset as reader
        warning("Using netCDF4-python as data reader.  NetCDF format supported only.")
    except ImportError:
        try:
            # Look for scipy's NetCDF reader ...
            import scipy.io.netcdf as reader
            warning("Using scipy.io as data reader.  NetCDF format supported only.")
        except ImportError:
            # scipy's NetCDF reader isn't installed either.  Uhoh ...
            fatalError("No scientific data reader found.  Exiting ...")

class DataIO:
    """
    DataIO
    Purpose:    Handles the reading and writing of data for HootPy using whichever reader is installed
    Started:    30 September 2010 by Tim Supinie (tsupinie#@ou.edu)
    Completed:    [not yet]
    Modified:    [not yet]
    """
コード例 #40
0
ファイル: samsungparser.py プロジェクト: HongilKim/scat
    def process_common_data(self, pkt):
        pkt = pkt[10:-1]
        arfcn = 0

        if pkt[0] == 0x03:  # Common Signalling Info
            #util.xxd(pkt)
            # pkt[1] - pkt[4] == ts
            chan_type = pkt[5]
            chan_subtype = pkt[6]
            direction = pkt[7]  # 2 - DL, 1 - UL
            msg_len = pkt[8] | (pkt[9] << 8)
            msg_content = pkt[10:]

            if chan_type == 0x30:  # UMTS RRC
                chan_map_ul = {
                    0x30: util.gsmtap_umts_rrc_types.UL_CCCH,
                    0x31: util.gsmtap_umts_rrc_types.UL_DCCH
                }
                chan_map_dl = {
                    0x30: util.gsmtap_umts_rrc_types.DL_CCCH,
                    0x31: util.gsmtap_umts_rrc_types.DL_DCCH,
                    0x32: util.gsmtap_umts_rrc_types.BCCH_BCH,
                    0x34: util.gsmtap_umts_rrc_types.PCCH
                }

                subtype = 0
                if direction == 2:
                    subtype = chan_map_dl[chan_subtype]
                    arfcn = self.umts_last_uarfcn_dl
                elif direction == 1:
                    subtype = chan_map_ul[chan_subtype]
                    arfcn = self.umts_last_uarfcn_ul
                else:
                    print('Unknown direction %02x' % direction)
                    return b''

                gsmtap_hdr = util.create_gsmtap_header(
                    version=2,
                    payload_type=util.gsmtap_type.UMTS_RRC,
                    arfcn=arfcn,
                    sub_type=subtype)
                return gsmtap_hdr + msg_content
            elif chan_type == 0x01:  # UMTS NAS
                if direction == 2:
                    arfcn = self.umts_last_uarfcn_dl
                elif direction == 1:
                    arfcn = self.umts_last_uarfcn_ul

                gsmtap_hdr = util.create_gsmtap_header(
                    version=2, payload_type=util.gsmtap_type.ABIS, arfcn=arfcn)
                return gsmtap_hdr + msg_content
            elif chan_type == 0x20:  # GSM RR
                # TODO: CCCH and SACCH are not distinguished by headers!
                # Some values are RR message, some are RR_short_PD
                if direction == 2:  # RR DL w/ pseudo length
                    lapdm_address = b'\x01'
                    # Control field
                    lapdm_control = b'\x03'
                    # length field
                    if msg_len > 63:
                        util.warning('message length longer than 63 (%s)' %
                                     msg_len)
                        return b''
                    lapdm_len = bytes([(msg_len << 2) | 0x01])

                    #msg_content = lapdm_address + lapdm_control + lapdm_len + msg_content

                    gsmtap_hdr = util.create_gsmtap_header(
                        version=2,
                        payload_type=util.gsmtap_type.UM,
                        sub_type=util.gsmtap_channel.CCCH
                    )  # Subtype (XXX: All CCCH)
                    return gsmtap_hdr + msg_content
                elif direction == 1:  # Only RR
                    gsmtap_hdr = util.create_gsmtap_header(
                        version=2, payload_type=util.gsmtap_type.ABIS)
                    return gsmtap_hdr + msg_content
            elif chan_type == 0x21:  # GSM RLC/MAC
                arfcn = 1
                if direction == 1:
                    arfcn = arfcn | (1 << 14)
                gsmtap_hdr = util.create_gsmtap_header(
                    version=2,
                    payload_type=util.gsmtap_type.UM,
                    arfcn=arfcn,
                    sub_type=util.gsmtap_channel.PACCH
                )  # Subtype (PACCH dissects as MAC)
                #return gsmtap_hdr + msg_content
                return b''
            else:
                print('Unknown channel type %02x for subcommand 0x03' %
                      chan_type)
                return b''
        else:
            print('Unknown subcommand %02x for command 0x21' % pkt[0])
            #util.xxd(pkt)
            return b''

        return b''
コード例 #41
0
ファイル: barra.py プロジェクト: ryan-leung/ml_monorepo
def process(filePath, source, verifyOnly=False):
    #process the RSK files for now
    if filePath.find(".RSK.") < 0:
        return
    file = open(filePath, "r")

    #The first 2 lines should be the pricedate and the modeldate
    tokens = file.readline().strip().split(":")
    if tokens[0] != "PriceDate":
        util.error("It doesn't seem like a barra daily format")
        raise Exception
    else:
        priceDate = __barraDateToCompact(tokens[1].strip())

    tokens = file.readline().strip().split(":")
    if tokens[0] != "ModelDate":
        util.error("It doesn't seem like a barra daily format")
        raise Exception
    else:
        #pass
        modelDate = __barraDateToCompact(tokens[1].strip())

    # If we have acquisition times, use these for real born time.
    # Else, use the priceDate + 1 day
    fileInfo = datafiles.read_info_file(filePath)
    if fileInfo['date_last_absent'] is not None:
        timestamp = util.convert_date_to_millis(fileInfo['date_first_present'])
        backfill = 0
        database.setAttributeAutoCreate(True)
    else:
        date = priceDate + datetime.timedelta(days=1)
        timestamp = util.convert_date_to_millis(date.strftime("%Y%m%d"))
        backfill = 1
        database.setAttributeAutoCreate(True)

    #get the header names. comma separated, surrounded by double quotes
    line = file.readline()
    headers = __getListFromBarraLine(line)

    #init the dabase
    #database.dropXrefCache()
    #database.addXrefCache(timestamp) #cache xrefs

    #######MAPPING VERIFICATION CODE########
    inconcistentMappings = []
    ########################################

    for line in file:
        data = __getListFromBarraLine(line)

        if len(data) != len(headers):
            util.warning("Skipping bad line: {}".format(line))
            continue

        data = dict(zip(headers, data))

        #######MAPPING VERIFICATION CODE########
        if verifyOnly:
            result = __verifyMapping(
                data["BARRID"], util.cusip8to9(data["CUSIP"]), data["TICKER"],
                source, timestamp,
                newdb.xrefsolve.preferUS)  #mirror the getSecid call
            if result is not None: inconcistentMappings.append(result)
            continue
        ########################################

        secid = __getSecId(data["BARRID"], util.cusip8to9(data["CUSIP"]),
                           data["TICKER"], source, timestamp,
                           newdb.xrefsolve.preferUS, filePath)
        if secid is None:
            continue

        #Now, insert barra attributes and attribute values
        __removeUnwantedAttributes(data)
        for attributeName, attributeValue in data.iteritems():
            if isinstance(attributeValue, str):
                table = "s"
            elif isinstance(attributeValue, int):
                table = "n"
            elif isinstance(attributeValue, float):
                table = "n"
            else:
                util.error(
                    "Dude, attribute values should be either int,float or str")
                raise

            #assert attributeName.startswith("INDNAME") and table=="s"

            #With the exeption of capitalization and price, the other barra attributes
            #are attributes that are evaluated monthly. for them, the date should be the
            #model date. price we ignore, while capitatlization, we only create a new tuple
            #if the capitalization has changed more than a threshould since the last date
            #for which we have a tuple
            if attributeName == "PRICE":
                continue
            elif attributeName == "CAPITALIZATION":
                database.insertAttribute(
                    "sec", "n", secid, util.convert_date_to_millis(priceDate),
                    source, attributeName, attributeValue, timestamp, None,
                    backfill, False, True, __capEquals)
            else:
                database.insertAttribute(
                    "sec", table, secid,
                    util.convert_date_to_millis(modelDate), source,
                    attributeName, attributeValue, timestamp, None, backfill)

    file.close()

    #######MAPPING VERIFICATION CODE########
    if verifyOnly:
        return inconcistentMappings
コード例 #42
0
def process(filepath, source):
    #if full
    if "full" in source or "load" in source:
        #timestamp=util.convert_date_to_millis("18000101");
        fileInfo = datafiles.read_info_file(filepath)
        timestamp = util.convert_date_to_millis(fileInfo['date_modified'])
        backfill = 1
        database.setAttributeAutoCreate(True)
        optimize = False
    else:
        fileInfo = datafiles.read_info_file(filepath)
        if fileInfo["date_last_absent"] is None:
            timestamp = util.convert_date_to_millis(fileInfo['date_modified'])
            backfill = 0
        else:
            timestamp = util.convert_date_to_millis(
                fileInfo['date_first_present'])
            backfill = 0
        database.setAttributeAutoCreate(False)
        optimize = True

    if "_g" in source:
        global_cs = True
    else:
        global_cs = False

    database.setAttributeAutoCreate(True)
    database.setCurrencyAutoCreate(True)

    #open the zipped file
    zf = zipfile.ZipFile(filepath)
    names = zf.namelist()
    assert len(names) == 1
    file = zf.open(names[0])

    #variables that persist through loop
    #presented here for clarity only
    table = None
    keyNames = None
    attributeNames = None
    numOfKeys = None

    if optimize:
        parsedLines = __optimize(file)

    #process lines
    counter = 0
    while True:
        if optimize:
            if len(parsedLines) == 0: break
            line = parsedLines.pop(0)

            if len(line) == 3:
                (command, keyValues,
                 attributeValues) = line[0], line[1], line[2]
            elif len(line) == 4:
                (table, numOfKeys, keyNames,
                 attributeNames) = line[0], line[1], line[2], line[3]
                continue
            else:
                continue
        else:
            line = __getSplitLine(file)
            if line is None: break

            if line[0] in ("T", "F", "E"):
                continue
            elif line[0] in ("H"):
                (table, numOfKeys, keyNames,
                 attributeNames) = __parseHeaderLine(line)
                continue
            elif line[0] in ("I,C,D,R"):
                (command, keyValues,
                 attributeValues) = __parseDataLine(line, numOfKeys)
            else:
                util.warning("Oh no! a K command on table {}: {}".format(
                    table, line))
                continue

        #progress
        counter = counter + 1
        if counter % 10000 == 0:
            util.info("{}: Processing line {}k".format(datetime.datetime.now(),
                                                       counter / 1000))

        #remove keys that are replicated in attributes
        keys = {}
        keys.update(zip(keyNames, keyValues))
        attributes = {}

        if command in ("I", "C"):
            for n, v in zip(attributeNames, attributeValues):
                if n not in keys and v != "": attributes[n] = v
        elif command in ("D"):
            for n, v in zip(attributeNames, attributeValues):
                if n not in keys and v == " ": attributes[n] = None
        elif command in ("R"):
            for n, v in zip(attributeNames, attributeValues):
                if n not in keys: attributes[n] = None

        if table == "security":
            __processSecurity(command, keys, attributes, timestamp, source,
                              backfill, global_cs)
        elif table == "sec_dprc":
            __processPrice(command, keys, attributes, timestamp, source,
                           backfill, global_cs)
            __processCSHOC(command, keys, attributes, timestamp, source,
                           backfill, global_cs)
        elif table == "company":
            __processCompany(command, keys, attributes, timestamp, source,
                             backfill, global_cs)
        elif table == "sec_divid":
            __processDividend(command, keys, attributes, timestamp, source,
                              backfill, global_cs)
        elif table == "sec_split":
            __processSplit(command, keys, attributes, timestamp, source,
                           backfill, global_cs)
        elif table == "co_industry":
            __processIndustry(command, keys, attributes, timestamp, source,
                              backfill, global_cs)
        elif table == "co_hgic":
            __processHgic(command, keys, attributes, timestamp, source,
                          backfill, global_cs)
        elif table in ("co_afnd1", "co_afnd2", "co_ifndq", "co_ifndsa",
                       "co_ifndytd"):
            __processFundamental(command, keys, attributes, timestamp, source,
                                 backfill, global_cs)
        elif table in ("co_idesind", 'co_adesind'):
            __processDesind(command, keys, attributes, timestamp, source,
                            backfill, global_cs)
        elif table in ("co_amkt", 'co_imkt'):
            __processMkt(command, keys, attributes, timestamp, source,
                         backfill, global_cs)
        elif table == "co_filedate":
            __processFiledate(command, keys, attributes, timestamp, source,
                              backfill, global_cs)
        elif table == "adsprate":
            __processCredit(command, keys, attributes, timestamp, source,
                            backfill, global_cs)
        elif table == "exrt_dly":
            __processExchange(command, keys, attributes, timestamp, source,
                              backfill, global_cs)
        else:
            continue

    #__processBufferedFundamentals(source, backfill, buffer)
    file.close()
    zf.close()
コード例 #43
0
                        os.path.join(args.location, datestr,
                                     "exec_order_ts.txt"), "w") as outfile:
                    outfile.write("\n".join(res))
                    outfile.write("\n")
            if args.fills:
                data = getLogs(datestr, r"fills\.txt")
                ticker2secid = getTicker2secid(
                    os.path.join(args.location, datestr, "tickers.txt"))
                res = parseFills(ticker2secid, data)
                with open(
                        os.path.join(args.location, datestr,
                                     "fills.{}.txt.tmp".format(datestr)),
                        "w") as outfile:
                    outfile.write("\n".join(res))
                    outfile.write("\n")
            if args.infraRecon:
                ticker2secid = getTicker2secid(
                    os.path.join(args.location, datestr, "tickers.txt"))
                res = doInfraRecon(ticker2secid, datestr)
                with open(
                        os.path.join(args.location, datestr,
                                     "fills.{}.txt.tmp".format(datestr)),
                        "w") as outfile:
                    outfile.write("\n".join(res))
                    outfile.write("\n")
        except Exception, e:
            util.warning(e)
            pass
        finally:
            date = date + dayDelta
コード例 #44
0
ファイル: mfclean.py プロジェクト: radio-astro/gyimager
def mfclean(options):
    clark_options = {}
    clark_options["gain"] = options.gain
    clark_options["iterations"] = options.iterations
    clark_options["cycle_speedup"] = options.cycle_speedup

    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg"
        % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    # TODO: Need to implement support for multiple channel images. Currently,
    # all data channels are combined into a single MFS image per correlation.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))

    n_model = 1
    # TODO: Check code for n_model > 1!
    assert(n_model == 1)

    # Comment from CASA source code:
    #
    # Set to search for peak in I^2+Q^2+U^2+V^2 domain or each stokes plane
    # seperately. Ignored for hogbom and msclean for now.
#    join_stokes = False
    join_stokes = True

    # Compute approximate PSFs.
    util.notice("computing approximate point spread functions...")
    psf = [None for i in range(n_model)]
    beam = [None for i in range(n_model)]
    for i in range(n_model):
        psf[i] = processor.point_spread_function(image_coordinates, image_shape)
        fit = casaimwrap.fit_gaussian_psf(image_coordinates.dict(),
            psf[i])
        assert(fit["ok"])

        beam[i] = BeamParameters((fit["major"] * numpy.pi) / (3600.0 * 180.0),
            (fit["minor"] * numpy.pi) / (3600.0 * 180.0), (fit["angle"]
            * numpy.pi) / 180.0)

        util.notice("model %d/%d: major axis: %f arcsec, minor axis: %f arcsec,"
            " position angle: %f deg" % (i, n_model - 1, abs(fit["major"]),
            abs(fit["minor"]), fit["angle"]))

    # Validate PSFs.
    (min_psf, max_psf, max_psf_outer, psf_patch_size, max_sidelobe) = \
        validate_psf(image_coordinates, psf, beam)
    clark_options["psf_patch_size"] = psf_patch_size

    updated = [False for i in range(n_model)]
    weight = [None for i in range(n_model)]
    model = [numpy.zeros(image_shape) for i in range(n_model)]
    delta = [numpy.zeros(image_shape) for i in range(n_model)]
    residual = [numpy.zeros(image_shape) for i in range(n_model)]

    if join_stokes:
        iterations = numpy.zeros((n_model, 1, image_shape[0]))
        stokes = ["JOINT"]
        cr_slices = [slice(None)]
    else:
        iterations = numpy.zeros((n_model, image_shape[1], image_shape[0]))
        stokes = image_coordinates.get_coordinate("stokes").get_stokes()
        cr_slices = [slice(i, i + 1) for i in range(4)]

    cycle = 0
    diverged = False
    absmax = options.threshold
    previous_absmax = 1e30

    while absmax >= options.threshold and numpy.max(iterations) \
        < options.iterations and (cycle == 0 or any(updated)):

        util.notice(">> starting major cycle: %d <<" % cycle)

        # Comment from CASA source code:
        #
        # Make the residual images. We do an incremental update for cycles after
        # the first one. If we have only one model then we use convolutions to
        # speed the processing
        util.notice("computing residuals...")

        # TODO: If n_models > 1, need to compute residuals from the sum of
        # the degridded visibilities (see LofarCubeSkyEquation.cc).
        assert(n_model == 1)
        if cycle == 0:
            # Assuming the initial models are zero, the residual visibilities
            # equal the observed visibilities and therefore we only need to
            # grid them.
            for i in range(n_model):
                residual[i], weight[i] = processor.grid(image_coordinates,
                    image_shape, processors.Normalization.FLAT_NOISE)
        else:
            for i in range(n_model):
                if updated[i]:
                    residual[i], weight[i] = \
                        processor.residual(image_coordinates, model[i],
                            processors.Normalization.FLAT_NOISE,
                            processors.Normalization.FLAT_NOISE)
                updated[i] = False

        # Compute residual statistics.
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f"
                % (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)

        # Comment from CASA source code:
        #
        # Check if absmax is 5% above its previous value.
        #
        # TODO: Value used does not look like 5%?
        if absmax >= 1.000005 * previous_absmax:
            diverged = True
            break

        # Store absmax of this major cycle for later reference.
        previous_absmax = absmax

        # Check stop criterium.
        if absmax < options.threshold:
            break

        # TODO: What is this really used for? And does the max weight indeed
        # correspond to sensitivity in Jy/beam?
        if cycle == 0:
            max_weight = 0.0
            for i in range(n_model):
                max_weight = max(max_weight, numpy.max(weight[i]))
            util.notice("maximum sensitivity: %f Jy/beam" % (1.0
                / numpy.sqrt(max_weight)))

        # Comment from CASA source code:
        #
        # Calculate the threshold for this cycle. Add a safety factor
        #
        # fractionOfPsf controls how deep the cleaning should go.
        # There are two user-controls.
        # cycleFactor_p : scale factor for the PSF sidelobe level.
        #                        1 : clean down to the psf sidelobe level
        #                        <1 : go deeper
        #                        >1 : shallower : stop sooner.
        #                        Default : 1.5
        # cycleMaxPsfFraction_p : scale factor as a fraction of the PSF peak
        #                                    must be 0.0 < xx < 1.0 (obviously)
        #                                    Default : 0.8
        fraction_of_psf = min(options.cycle_max_psf_fraction,
            options.cycle_factor * max_sidelobe)

        if fraction_of_psf > 0.8:
            util.warning("PSF fraction for threshold computation is too"
                " high: %f. Forcing to 0.8 to ensure that the threshold is"
                " smaller than the peak residual!" % fraction_of_psf)
            fraction_of_psf = 0.8   # painfully slow!

        # Update cycle threshold.
        cycle_threshold = max(0.95 * options.threshold, fraction_of_psf
            * absmax)
        clark_options["cycle_threshold"] = cycle_threshold

        util.notice("minor cycle threshold max(0.95 * %f, peak residual * %f):"
            " %f" % (options.threshold, fraction_of_psf, cycle_threshold))

        # Execute the minor cycle (Clark clean) for each channel of each model.
        util.notice("starting minor cycle...")
        for i in range(n_model):
            if max(abs(resmin[i]), abs(resmax[i])) < cycle_threshold:
                util.notice("model %d/%d: peak residual below threshold"
                    % (i, n_model - 1))
                continue

            if max_psf[i] <= 0.0:
                util.warning("model %d/%d: point spread function negative or"
                    " zero" % (i, n_model - 1))
                continue

            # Zero the delta image for this model.
            delta[i].fill(0.0)

            for (cr, cr_slice) in enumerate(cr_slices):
                for ch in range(len(residual[i])):
                    # TODO: The value of max_weight is only updated during
                    # cycle 0. Is this correct?
                    #
                    assert(len(weight[i].shape) == 2
                        and weight[i].shape[:2] == residual[i].shape[:2])

                    plane_weight = numpy.sqrt(weight[i][ch, cr_slice]
                        / max_weight)
                    if numpy.any(plane_weight > 0.01):
                        weight_mask = numpy.ones((residual[i].shape[2:]))
                    else:
                        weight_mask = numpy.zeros((residual[i].shape[2:]))

                    # Call CASA Clark clean implementation (minor cycle).
                    # TODO: When cleaning each Stokes parameter separately,
                    # the PSF of Stokes I is used for all others as well?
                    #
                    # Comment from CASA source code:
                    #
                    # We only want the PSF for the first polarization so we
                    # iterate over polarization LAST.
                    #
                    result = casaimwrap.clark_clean(psf[i][ch,0,:,:],
                        residual[i][ch,cr_slice,:,:], weight_mask,
                        iterations[i,cr,ch], clark_options)

                    if result["iterations"] > iterations[i,cr,ch]:
                        updated[i] = True
                        delta[i][ch,cr_slice,:,:] = result["delta"]
                        iterations[i,cr,ch] = result["iterations"]
                    else:
                        assert(numpy.all(result["delta"] == 0.0))

                util.notice("model %d/%d: stokes: %s, cleaned: %f Jy, "
                    "iterations per channel: %s" % (i, n_model - 1,
                    stokes[cr], numpy.sum(delta[i][ch,cr_slice,:,:]),
                    str(iterations[i,cr,:])))

        # Update model images if required.
        for i in range(n_model):
            if updated[i]:
                model[i] += delta[i]

        # Update major cycle counter.
        cycle += 1

    if any(updated):
        util.notice("finalizing residual images for all fields...")
        for i in range(n_model):
            if updated[i]:
                residual[i], weight[i] = processor.residual(image_coordinates,
                    model[i], processors.Normalization.FLAT_NOISE,
                    processors.Normalization.FLAT_NOISE)
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f"
                % (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)
    else:
        util.notice("residual images for all fields are up-to-date...")

    # Store output images.
    util.notice("storing average response...")
    util.store_image(options.image + ".response", image_coordinates,
        processor.response(image_coordinates, image_shape))

    util.notice("storing model images...")
    for i in range(n_model):
        util.store_image(options.image + ".model.flat_noise",
            image_coordinates, model[i])
        util.store_image(options.image + ".model", image_coordinates,
            processor.normalize(image_coordinates, model[i],
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    util.notice("storing residual images...")
    for i in range(n_model):
        util.store_image(options.image + ".residual.flat_noise",
            image_coordinates, residual[i])
        util.store_image(options.image + ".residual", image_coordinates,
            processor.normalize(image_coordinates, residual[i],
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    util.notice("storing restored images...")
    for i in range(n_model):
        restored = restore_image(image_coordinates.dict(), model[i],
            residual[i], beam[i])

        util.store_image(options.image + ".restored.flat_noise",
            image_coordinates, restored)
        util.store_image(options.image + ".restored", image_coordinates,
            processor.normalize(image_coordinates, restored,
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    # Print some statistics.
    for i in range(n_model):
        util.notice("model %d/%d: clean flux: %f, residual rms: %f" % (i,
            n_model - 1, numpy.sum(model[i]), numpy.std(residual[i])))

    if diverged:
        util.error("clean diverged.")
    elif absmax < options.threshold:
        util.notice("clean converged.")
    else:
        util.warning("clean did not reach threshold: %f Jy."
            % options.threshold)
コード例 #45
0
def doInfraRecon(ticker2secid, datestr):
    (seqnum2Info, orderid2Info) = getSeqnumOrderidMaps(datestr)
    execFills = readInfraExecLogs(datestr)
    processedExecFills = associateOrderidToExecFills(execFills, seqnum2Info)
    orderid2fills = readOurFillsFile(datestr)

    retVal = [
        "type|date|strat|seqnum|secid|ticker|ts_received|ts_exchange|shares|price|exchange|liquidity|orderID|tactic"
    ]
    seqnum = 0
    missingFills = []
    for eFill in processedExecFills:
        seqnum += 1
        orderid = eFill['orderid']
        if orderid not in orderid2fills:
            matchIndex = -1
        else:
            matchIndex = matchFill(eFill, orderid2fills[orderid],
                                   MAX_TIMESTAMP_DIFF)
        if matchIndex < 0:
            #util.warning('Did not find a match. Creating missing fill: ' + str(eFill))
            missingFills.append(eFill)
            # add the fill
            retVal.append(
                "F|{date}|1|{seqnum}|{secid}|{ticker}|{ts}|{ts}|{qty}|{price}|{exch}|{liq}|{orderid}|{tactic}"
                .format(date=datestr,
                        seqnum=seqnum,
                        secid=ticker2secid[eFill['ticker']],
                        ticker=eFill['ticker'],
                        ts=eFill['timestamp'],
                        qty=eFill['qty'],
                        price=eFill['price'],
                        exch=eFill['exch'],
                        liq=eFill['liq'],
                        orderid=eFill['orderid'],
                        tactic=eFill['tactic']))
            continue
        matchedFill = orderid2fills[orderid][matchIndex]
        matchedFill['date'] = datestr
        matchedFill['seqnum'] = seqnum
        retVal.append(
            "F|{date}|1|{seqnum}|{secid}|{ticker}|{ts_received}|{ts_exchange}|{qty}|{price}|{exch}|{liq}|{orderid}|{tactic}"
            .format(**matchedFill))

    nDelayedFills = 0
    leftOverMissingFills = []
    for eFill in missingFills:
        orderid = eFill['orderid']
        if orderid not in orderid2fills:
            matchIndex = -1
        else:
            matchIndex = matchFill(eFill, orderid2fills[orderid], 1000)
        if matchIndex < 0:
            #util.warning('Did not find a match. Creating missing fill: ' + str(eFill))
            leftOverMissingFills.append(eFill)
        else:
            nDelayedFills += 1

    util.info(
        'Substituted timestamps for {} fills, since they had a delayed timestamp.'
        .format(nDelayedFills))

    nSuperDelayedFills = 0
    actualMissingFills = 0
    for eFill in leftOverMissingFills:
        orderid = eFill['orderid']
        if orderid not in orderid2fills:
            matchIndex = -1
        else:
            matchIndex = matchFill(eFill, orderid2fills[orderid], 3000)
        if matchIndex < 0:
            util.warning('Did not find a match. Creating missing fill: ' +
                         str(eFill))
            actualMissingFills += 1
        else:
            nSuperDelayedFills += 1

    util.info(
        'Substituted timestamps for {} fills, since they had a delayed timestamp of over 1 second.'
        .format(nSuperDelayedFills))
    util.info('Actual missing fills = {}'.format(actualMissingFills))

    for orderid in orderid2fills:
        for fill in orderid2fills[orderid]:
            if fill['matched'] != True:
                util.warning("Could not match fill: " + str(fill))
    return retVal
コード例 #46
0
def __optimize(file):
    organizer = {}
    table = None
    keyNames = None
    attributeNames = None
    numOfKeys = None

    while True:
        line = __getSplitLine(file)
        if line is None: break

        if line[0] in ("T", "F", "E"):
            continue
        elif line[0] in ("H"):
            (table, numOfKeys, keyNames,
             attributeNames) = __parseHeaderLine(line)
            continue
        elif line[0] in ("I,C,D,R"):
            d = (command, keyValues,
                 attributeValues) = __parseDataLine(line, numOfKeys)
            t = (table, numOfKeys, keyNames, attributeNames)
            if t in organizer:
                lines = organizer[t]
            else:
                lines = []
                organizer[t] = lines
            lines.append(d)
            continue
        else:
            util.warning("Oh no! a K command on table {}: {}".format(
                table, line))
            continue

    allLines = []
    for header, datalines in organizer.iteritems():
        datalines.sort(key=lambda x: x[1])
        #remove some redundancies from datalines
        start = 0
        end = 0
        while start != len(datalines):
            (start, end) = __consecutiveEqual(datalines, start, lambda x: x[1])

            for i in xrange(start, end - 1, 1):
                if datalines[i][0] == "D" and datalines[i + 1][0] == "R":
                    datalines[i] = ()
                elif (datalines[i][0] == "D" or datalines[i][0]
                      == "C") and datalines[i + 1][0] == "C":
                    same = True
                    for x, y in zip(datalines[i][2], datalines[i + 1][2]):
                        if len(x) == 0 and len(y) == 0:
                            continue
                        elif len(x) > 0 and len(y) > 0:
                            continue
                        else:
                            same = False
                            break
                    if same:
                        datalines[i] = ()
                    else:
                        newdataline = []
                        for x, y in zip(datalines[i][2], datalines[i + 1][2]):
                            if len(y) > 0:
                                newdataline.append('')
                            else:
                                newdataline.append(x)
                        datalines[i] = (datalines[i][0], datalines[i][1],
                                        tuple(newdataline))

            start = end

        allLines.append(header)
        allLines.extend(datalines)

    return allLines
コード例 #47
0
    def _load_style_substitutions(self):
        """
        Loads Italian, Mexican, South Asian, vegan, AND vegetarian text files into fields
        """
        # TODO: I feel really bad about the use of copied code, so a helper function could be good to write sometime.
        mexican_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_to_italian", "#end_mexican_to_italian")
        mexican_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_to_asian", "#end_mexican_to_asian")
        asian_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_to_italian", "#end_asian_to_italian")
        asian_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_to_mexican", "#end_asian_to_mexican")
        italian_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_to_mexican", "#end_italian_to_mexican")
        italian_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_to_asian", "#end_italian_to_asian")
        italian_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_spices_subs", "#end_italian_spices_subs")
        italian_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#italian_spices_subs", "#end_italian_spices_subs")
        asian_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_spices", "#end_asian_spices")
        asian_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#asian_spices_subs", "#end_asian_spices_subs")
        mexican_spices = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_spices", "#end_mexican_spices")
        mexican_spices_subs = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#mexican_spices_subs", "#end_mexican_spices_subs")
        neutral_to_asian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_asian", "#end_neutral_to_asian")
        neutral_to_mexican = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_mexican", "#end_neutral_to_mexican")
        neutral_to_italian = read_specific_lines(
            util.relative_path("kb_data/style_substitutions.txt"),
            "#neutral_to_italian", "#end_neutral_to_italian")

        vegan_sub_list = read_txt_lines_into_list(
            'kb_data/vegan_substitutions.txt')
        vegetarian_sub_list = read_txt_lines_into_list(
            'kb_data/vegetarian_substitutions.txt')

        for raw_sub in italian_spices:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_spices_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian'))

        for raw_sub in italian_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian'))

        for raw_sub in asian_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian'))

        for raw_sub in mexican_spices_subs:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_spices_subs.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican'))

        for spice in mexican_spices:
            self.mexican_spices_list.append(self.lookup_single_food(spice))

        for spice in asian_spices:
            self.asian_spices_list.append(self.lookup_single_food(spice))

        for raw_sub in mexican_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican_to_italian'))

        for raw_sub in mexican_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.mexican_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'mexican_to_asian'))

        for raw_sub in asian_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian_to_italian'))

        for raw_sub in asian_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.asian_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'asian_to_mexican'))

        for raw_sub in italian_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian_to_asian'))

        for raw_sub in italian_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.italian_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'italian_to_mexican'))

        for raw_sub in vegan_sub_list:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.vegan_substitutions.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'vegan'))

        for raw_sub in vegetarian_sub_list:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.vegetarian_substitutions.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'vegetarian'))

        for raw_sub in neutral_to_italian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_italian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_italian'))

        for raw_sub in neutral_to_asian:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_asian_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_asian'))

        for raw_sub in neutral_to_mexican:
            parsed_in_out = [thing.strip() for thing in raw_sub.split('=')]
            if len(parsed_in_out) != 2:
                util.warning('Incorrect substitution string: ' + raw_sub)
                continue
            self.neutral_to_mexican_list.append(
                self._format_raw_sub(parsed_in_out[0], parsed_in_out[1],
                                     'neutral_to_mexican'))
コード例 #48
0
ファイル: util.py プロジェクト: pombredanne/cc-utils
def filter_and_display_upload_results(
    upload_results: typing.Sequence[UploadResult],
    cve_threshold=7,
    ignore_if_triaged=True,
) -> typing.Iterable[typing.Tuple[UploadResult, int]]:
    # we only require the analysis_results for now

    results_without_components = []
    results_below_cve_thresh = []
    results_above_cve_thresh = []

    for upload_result in upload_results:
        result = upload_result.result
        components = result.components()
        if not components:
            results_without_components.append(upload_result)
            continue

        greatest_cve = -1

        for component in components:
            vulnerabilities = filter(lambda v: not v.historical(),
                                     component.vulnerabilities())
            if ignore_if_triaged:
                vulnerabilities = filter(lambda v: not v.has_triage(),
                                         vulnerabilities)
            greatest_cve_candidate = highest_major_cve_severity(
                vulnerabilities)
            if greatest_cve_candidate > greatest_cve:
                greatest_cve = greatest_cve_candidate

        if greatest_cve >= cve_threshold:
            results_above_cve_thresh.append((upload_result, greatest_cve))
            continue
        else:
            results_below_cve_thresh.append((upload_result, greatest_cve))
            continue

    if results_without_components:
        warning(
            f'Protecode did not identify components for {len(results_without_components)}:\n'
        )
        for result in results_without_components:
            print(result.result.display_name())
        print('')

    def render_results_table(
            upload_results: typing.Sequence[typing.Tuple[UploadResult, int]]):
        header = ('Component Name', 'Greatest CVE')
        results = sorted(upload_results, key=lambda e: e[1])

        result = tabulate.tabulate(
            map(lambda r: (r[0].result.display_name(), r[1]), results),
            headers=header,
            tablefmt='fancy_grid',
        )
        print(result)

    if results_below_cve_thresh:
        info(
            f'The following components were below configured cve threshold {cve_threshold}'
        )
        render_results_table(upload_results=results_below_cve_thresh)
        print('')

    if results_above_cve_thresh:
        warning('The following components have critical vulnerabilities:')
        render_results_table(upload_results=results_above_cve_thresh)

    return results_above_cve_thresh
コード例 #49
0
ファイル: grader.py プロジェクト: abreen/socrates.py
def grade(criteria, submissions, filename,
          assume_missing=False, late_check=True):
    found = []
    num_missing = 0
    total = criteria.total_points

    for f in criteria.files:
        crit_dir, crit_name = os.path.split(f.path)

        for s in submissions:
            sub_dir, sub_name = os.path.split(s)

            if crit_name == sub_name:
                found.append(f)
                break
        else:

            util.warning("could not find file '{}'".format(f.path))

            if len(submissions) < 1:
                continue

            if not assume_missing:
                # find the submission directory (it could be the
                # current working directory, but maybe not)
                submission_dir, _ = os.path.split(submissions[0])

                if not submission_dir:
                    submission_dir = os.path.abspath(os.curdir)

                choices = [f for f in os.listdir(submission_dir)
                           if os.path.isfile(os.path.join(submission_dir, f))]
                choices.append("skip grading this submission now")
                choices.append("mark the file as missing")

                util.info("this student may have named the file incorrectly")

                # we prompt the grader for zero or one choice
                got = prompt(choices, '1')
                got = got[0]

                if got == len(choices) - 1:
                    # declare the file missing
                    num_missing += 1
                    continue

                elif got == len(choices) - 2:
                    util.info("skipping this submission")
                    util.exit(util.EXIT_WITH_DEFER)

                else:
                    # get absolute path to the old and new files
                    sname = choices[got]

                    opath = os.path.join(submission_dir, sname)
                    npath = os.path.join(submission_dir, crit_name)

                    try:
                        os.rename(opath, npath)
                    except:
                        util.error("error renaming incorrectly named file")
                        util.print_traceback()
                        util.exit(util.ERR_GRADING_MISC)

                    found.append(f)

    out = io.StringIO()

    try:
        for f in criteria.files:
            out.write(util.heading("{} [{} points]".format(f, f.point_value),
                                   level=2))

            if f not in found:
                total -= f.point_value
                out.write("-{}\tnot submitted\n".format(f.point_value))
                out.write("\n\n")
                continue

            util.info("running tests for " + str(f))

            points_taken = 0
            points_taken += write_results(out, f.run_tests())

            if late_check:
                file_stat = os.stat(f.path)
                mtime = datetime.datetime.fromtimestamp(file_stat.st_mtime)

                mult = criteria.get_late_penalty(mtime)
                late_penalty = f.point_value * mult

                if late_penalty != 0:
                    util.warning("taking {}% late penalty".format(mult * 100))

                    adjusted = min(f.point_value - points_taken, late_penalty)
                    out.write("-{}\tsubmitted late\n".format(adjusted))
                    points_taken += adjusted

            total -= min(f.point_value, points_taken)

            out.write("\n")

        out.write("\nTotal: {}\n".format(total))

    except KeyboardInterrupt:
        out.close()

        util.warning("stopping (received interrupt)")
        util.exit(util.ERR_INTERRUPTED)

    except:
        out.close()

        util.exit(util.ERR_GRADING_MISC)

    with open(filename, 'w') as f:
        out.seek(0)
        f.write(out.read())

    return num_missing
コード例 #50
0
                    continue

                f = open(p)
                data = f.read()
                f.close()
                if data:
                    print(
                        "-- command %s (note: suppressed by default, "
                        "see sandbox dir for log files) --" % (type))
                    print "--\n%s--\n" % data

        test_result = cmd_object.result
        if not test_result:
            break
    if not interpolated_variables:
        warning('no substitutions found. Fetched root ignored?')

    # Remove the temporary directory.
    if is_temp:
        if shell.execute(['rm', '-rf', sandbox]) != 0:
            note('unable to remove sandbox dir %r' % path)

    return test_result, command_objects


def get_best_match(builds, name, key=lambda x: x):
    builds = list(builds)
    builds.sort(key=key)

    if name is None and builds:
        return builds[-1]
コード例 #51
0
ファイル: acquire.py プロジェクト: ryan-leung/ml_monorepo
            sdir = config["remote_dir"] + "/" + sub_dir
            util.info("Looking in %s" % sdir)
            source.cwd(sdir)
            listing = source.list(config["regex"])
            filecnt = 0

            for info in listing:
                filename = info[0]
                size = info[1]
                util.info("Looking at file: %s" % filename)

                # Ingore exceptions because sometimes FTP mod time failed when new files were being uploaded
                mod_time = source.modtime(filename)
                if mod_time is None:
                    util.warning(
                        "Could not get mod time of %s, skipping file" %
                        filename)
                    continue

                local_filename = mod_time.strftime(config["prefix"])
                local_filename += filename if options.source != "newsscope" else filename.split(
                    "/")[-1] + ".xml.gz"
                if options.source == "bberg2" and filename.count(".enc") > 0:
                    local_filename = local_filename.replace(".enc", "")
                # Add a hash of the timestamp for uniqueness/detecting modified files
                hash = hashlib.md5()
                hash.update(mod_time.__str__())
                hash.update(size.__str__())
                local_filename += ".%s" % hash.hexdigest()[0:8]
                local_path = "%s/%s/%s/%s/%s" % (
                    os.environ["DATA_DIR"], config["local_dir"],
コード例 #52
0
 def _check_http_code(self, result, url):
     if result.status_code < 200 or result.status_code >= 300:
         warning('{c} - {m}: {u}'.format(c=result.status_code, m=result.content, u=url))
         raise RuntimeError()
コード例 #53
0
ファイル: repo.py プロジェクト: tgphelps/ddu
    def verify_backups(self, only='') -> None:
        """Verify all, or only the latest, backup(s) in this repository.

        Iterate through every 'new' and 'old' entry in every (or latest)
        backup, and verify that (1) the hash exists, and (2) its contents
        matches its name.
        """
        if only == '':
            bkup_list = self.all_backups
        else:
            bkup_list = [only]
        num_backups = len(bkup_list)
        # entries = 0
        num_files = 0
        hits = 0
        all_hashes: Set[str] = set()
        good: Set[str] = set()
        bad: Set[str] = set()
        missing: Set[str] = set()
        for b in bkup_list:
            util.msg(f'Checking: {b}')
            bkup = os.path.join(self.backups, b)
            with open(bkup, 'rt') as f:
                line_num = 0
                saw_end = False
                for line in f:
                    line_num += 1
                    if line_num == 1 and not line.startswith('start'):
                        util.fatal(f'Backup {bkup} corrupted. No start.')
                    if line[0:3] in ('new', 'old'):
                        num_files += 1
                        flds = line.split('\t')
                        h = flds[1]
                        this_file = flds[3]
                        all_hashes.add(h)
                        if h in good:
                            hits += 1
                            # continue
                        elif h in bad:
                            hits += 1
                            util.error(f'invalid hash {h} for {this_file}')
                            # continue
                        else:
                            # We haven't seen this hash before
                            (d, ff) = self.fname_from_hash(h)
                            if self.find_file(os.path.join(d, ff)):
                                if _verify_hash(os.path.join(self.objects, d),
                                                ff):
                                    good.add(h)
                                else:
                                    bad.add(h)
                                    t = this_file
                                    util.error(f'invalid hash {h} for {t}')
                            else:
                                missing.add(h)
                                util.error(f'missing {h} for {this_file}')
                    else:
                        # print(line_num, line)
                        # This should be a trailer line
                        if line.startswith('end'):
                            saw_end = True
                if not saw_end:
                    util.warning(f'Backup {bkup} has no end marker')

        if len(all_hashes) != len(good) + len(bad) + len(missing):
            util.fatal(f'hash bug: {len(all_hashes)} \
                         {len(good)} {len(bad)} {len(missing)}')
        util.msg('Verify results:')
        util.msg(f'backups checked = {num_backups}')
        util.msg(f'files checked = {num_files}')
        util.msg(f'invalid = {len(bad)}')
        util.msg(f'missing = {len(missing)}')
        util.msg(f'cache hits = {hits}')
コード例 #54
0
ファイル: ci.py プロジェクト: efcs/zorg
                if not os.path.exists(p):
                    continue

                f = open(p)
                data = f.read()
                f.close()
                if data:
                    print ("-- command %s (note: suppressed by default, "
                           "see sandbox dir for log files) --" % (type))
                    print "--\n%s--\n" % data

        test_result = cmd_object.result
        if not test_result:
            break
    if not interpolated_variables:
        warning('no substitutions found. Fetched root ignored?')

    # Remove the temporary directory.
    if is_temp:
        if shell.execute(['rm', '-rf', sandbox]) != 0:
            note('unable to remove sandbox dir %r' % path)

    return test_result, command_objects


def get_best_match(builds, name, key=lambda x: x):
    builds = list(builds)
    builds.sort(key=key)

    if name is None and builds:
        return builds[-1]
コード例 #55
0
ファイル: replicator.py プロジェクト: minchaow/cc-utils
    def process_results(self, results):
        # collect pipelines by concourse target (concourse_cfg, team_name) as key
        concourse_target_results = {}
        for result in results:
            definition_descriptor = result.definition_descriptor
            concourse_target_key = definition_descriptor.concourse_target_key()
            if concourse_target_key not in concourse_target_results:
                concourse_target_results[concourse_target_key] = set()
            concourse_target_results[concourse_target_key].add(result)

        for concourse_target_key, concourse_results in concourse_target_results.items():
            # TODO: implement eq for concourse_cfg
            concourse_cfg, concourse_team = next(iter(
                concourse_results)).definition_descriptor.concourse_target()
            concourse_results = concourse_target_results[concourse_target_key]
            concourse_api = client.from_cfg(
                concourse_cfg=concourse_cfg,
                team_name=concourse_team,
            )
            # find pipelines to remove
            deployed_pipeline_names = set(map(
                lambda r: r.definition_descriptor.pipeline_name, concourse_results
            ))

            pipelines_to_remove = set(concourse_api.pipelines()) - deployed_pipeline_names

            for pipeline_name in pipelines_to_remove:
                info('removing pipeline: {p}'.format(p=pipeline_name))
                concourse_api.delete_pipeline(pipeline_name)

            # trigger resource checks in new pipelines
            self._initialise_new_pipeline_resources(concourse_api, concourse_results)

            # order pipelines alphabetically
            pipeline_names = list(concourse_api.pipelines())
            pipeline_names.sort()
            concourse_api.order_pipelines(pipeline_names)

        # evaluate results
        failed_descriptors = [
            d for d in results
            if not d.deploy_status & DeployStatus.SUCCEEDED
        ]

        failed_count = len(failed_descriptors)

        info('Successfully replicated {d} pipeline(s)'.format(d=len(results) - failed_count))

        if failed_count == 0:
            return True

        warning('Errors occurred whilst replicating {d} pipeline(s):'.format(
            d=failed_count,
        )
        )

        all_notifications_succeeded = True
        for failed_descriptor in failed_descriptors:
            warning(failed_descriptor.definition_descriptor.pipeline_name)
            try:
                self._notify_broken_definition_owners(failed_descriptor)
            except Exception:
                warning('an error occurred whilst trying to send error notifications')
                traceback.print_exc()
                all_notifications_succeeded = False

        # signall error only if error notifications failed
        return all_notifications_succeeded
コード例 #56
0
ファイル: config.py プロジェクト: abreen/socrates.py
# the current working directory at the time socrates started
_cwd = os.getcwd()

# the absolute path to this config.py file while running
_conf = os.path.abspath(inspect.getfile(inspect.currentframe()))

# SOCRATES_DIR should contain the running socrates.py file
SOCRATES_DIR, _ = os.path.split(_conf)
SOCRATES_CONFIG = SOCRATES_DIR + os.sep + 'socrates.ini'

_parser = configparser.ConfigParser()

if os.path.isfile(SOCRATES_CONFIG):
    _parser.read(SOCRATES_CONFIG)
    if len(_parser) < 2:
        util.warning("found config file, but it looks incomplete")

hooks_dir = _parser.get('socrates', 'hooks_dir',
                        fallback=SOCRATES_DIR + os.sep + 'hooks')
scripts_dir = _parser.get('socrates', 'scripts_dir',
                          fallback=SOCRATES_DIR + os.sep + 'scripts')
static_dir = _parser.get('socrates', 'static_dir',
                         fallback=SOCRATES_DIR + os.sep + 'static')
dropbox_dir = _parser.get('socrates', 'dropbox_dir',
                          fallback=SOCRATES_DIR + os.sep + 'dropbox')
criteria_dir = _parser.get('socrates', 'criteria_dir',
                           fallback=SOCRATES_DIR + os.sep + 'criteria')

from datetime import timedelta as _td
if _parser.has_option('socrates', 'grace_period'):
    _grace_str = _parser.get('socrates', 'grace_period')
コード例 #57
0
ファイル: replicator.py プロジェクト: minchaow/cc-utils
    def _notify_broken_definition_owners(self, failed_descriptor):
        definition_descriptor = failed_descriptor.definition_descriptor
        main_repo = definition_descriptor.main_repo
        github_cfg = github_cfg_for_hostname(self._cfg_set, main_repo['hostname'])
        github_api = _create_github_api_object(github_cfg)
        repo_owner, repo_name = main_repo['path'].split('/')

        githubrepobranch = GitHubRepoBranch(
            github_config=github_cfg,
            repo_owner=repo_owner,
            repo_name=repo_name,
            branch=main_repo['branch'],
        )

        repo_helper = GitHubRepositoryHelper.from_githubrepobranch(
            githubrepobranch=githubrepobranch,
        )

        codeowners_enumerator = CodeownersEnumerator()
        codeowners_resolver = CodeOwnerEntryResolver(github_api=github_api)
        recipients = set(codeowners_resolver.resolve_email_addresses(
            codeowners_enumerator.enumerate_remote_repo(github_repo_helper=repo_helper)
        ))

        # in case no codeowners are available, resort to using the committer
        if not recipients:
            head_commit = repo_helper.repository.commit(main_repo['branch'])
            user_ids = {
                user_info.get('login')
                for user_info
                in (head_commit.committer, head_commit.author)
                if user_info.get('login')
            }
            for user_id in user_ids:
                user = github_api.user(user_id)
                if user.email:
                    recipients.add(user.email)

        # if there are still no recipients available print a warning
        if not recipients:
            warning(textwrap.dedent(
                f"""
                Unable to determine recipient for pipeline '{definition_descriptor.pipeline_name}'
                found in branch '{main_repo['branch']}' ({main_repo['path']}). Please make sure that
                CODEOWNERS and committers have exposed a public e-mail address in their profile.
                """
            ))
        else:
            info(f'Sending notification e-mail to {recipients} ({main_repo["path"]})')
            email_cfg = self._cfg_set.email()
            _send_mail(
                email_cfg=email_cfg,
                recipients=recipients,
                subject='Your pipeline definition in {repo} is erroneous'.format(
                    repo=main_repo['path'],
                ),
                mail_template=(
                    f"The pipeline definition for pipeline '{definition_descriptor.pipeline_name}' "
                    f" on branch '{main_repo['branch']}' contains errors.\n\n"
                    f"Error details:\n{str(failed_descriptor.error_details)}"
                )
            )
コード例 #58
0
 def bail(self, error):
     self.ctx.getConfig().purge()
     util.warning(self.ctx, error)
コード例 #59
0
def mfclean(options):
    clark_options = {}
    clark_options["gain"] = options.gain
    clark_options["iterations"] = options.iterations
    clark_options["cycle_speedup"] = options.cycle_speedup

    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
                                                max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel" %
                (3600.0 * delta_px * 180.0 / numpy.pi))

    # TODO: Need to implement support for multiple channel images. Currently,
    # all data channels are combined into a single MFS image per correlation.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:],
                                          [delta_px, delta_px],
                                          processor.phase_reference(),
                                          channel_freq, channel_width))

    n_model = 1
    # TODO: Check code for n_model > 1!
    assert (n_model == 1)

    # Comment from CASA source code:
    #
    # Set to search for peak in I^2+Q^2+U^2+V^2 domain or each stokes plane
    # seperately. Ignored for hogbom and msclean for now.
    #    join_stokes = False
    join_stokes = True

    # Compute approximate PSFs.
    util.notice("computing approximate point spread functions...")
    psf = [None for i in range(n_model)]
    beam = [None for i in range(n_model)]
    for i in range(n_model):
        psf[i] = processor.point_spread_function(image_coordinates,
                                                 image_shape)
        fit = casaimwrap.fit_gaussian_psf(image_coordinates.dict(), psf[i])
        assert (fit["ok"])

        beam[i] = BeamParameters((fit["major"] * numpy.pi) / (3600.0 * 180.0),
                                 (fit["minor"] * numpy.pi) / (3600.0 * 180.0),
                                 (fit["angle"] * numpy.pi) / 180.0)

        util.notice(
            "model %d/%d: major axis: %f arcsec, minor axis: %f arcsec,"
            " position angle: %f deg" % (i, n_model - 1, abs(
                fit["major"]), abs(fit["minor"]), fit["angle"]))

    # Validate PSFs.
    (min_psf, max_psf, max_psf_outer, psf_patch_size, max_sidelobe) = \
        validate_psf(image_coordinates, psf, beam)
    clark_options["psf_patch_size"] = psf_patch_size

    updated = [False for i in range(n_model)]
    weight = [None for i in range(n_model)]
    model = [numpy.zeros(image_shape) for i in range(n_model)]
    delta = [numpy.zeros(image_shape) for i in range(n_model)]
    residual = [numpy.zeros(image_shape) for i in range(n_model)]

    if join_stokes:
        iterations = numpy.zeros((n_model, 1, image_shape[0]))
        stokes = ["JOINT"]
        cr_slices = [slice(None)]
    else:
        iterations = numpy.zeros((n_model, image_shape[1], image_shape[0]))
        stokes = image_coordinates.get_coordinate("stokes").get_stokes()
        cr_slices = [slice(i, i + 1) for i in range(4)]

    cycle = 0
    diverged = False
    absmax = options.threshold
    previous_absmax = 1e30

    while absmax >= options.threshold and numpy.max(iterations) \
        < options.iterations and (cycle == 0 or any(updated)):

        util.notice(">> starting major cycle: %d <<" % cycle)

        # Comment from CASA source code:
        #
        # Make the residual images. We do an incremental update for cycles after
        # the first one. If we have only one model then we use convolutions to
        # speed the processing
        util.notice("computing residuals...")

        # TODO: If n_models > 1, need to compute residuals from the sum of
        # the degridded visibilities (see LofarCubeSkyEquation.cc).
        assert (n_model == 1)
        if cycle == 0:
            # Assuming the initial models are zero, the residual visibilities
            # equal the observed visibilities and therefore we only need to
            # grid them.
            for i in range(n_model):
                residual[i], weight[i] = processor.grid(
                    image_coordinates, image_shape,
                    processors.Normalization.FLAT_NOISE)
        else:
            for i in range(n_model):
                if updated[i]:
                    residual[i], weight[i] = \
                        processor.residual(image_coordinates, model[i],
                            processors.Normalization.FLAT_NOISE,
                            processors.Normalization.FLAT_NOISE)
                updated[i] = False

        # Compute residual statistics.
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f" %
                        (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)

        # Comment from CASA source code:
        #
        # Check if absmax is 5% above its previous value.
        #
        # TODO: Value used does not look like 5%?
        if absmax >= 1.000005 * previous_absmax:
            diverged = True
            break

        # Store absmax of this major cycle for later reference.
        previous_absmax = absmax

        # Check stop criterium.
        if absmax < options.threshold:
            break

        # TODO: What is this really used for? And does the max weight indeed
        # correspond to sensitivity in Jy/beam?
        if cycle == 0:
            max_weight = 0.0
            for i in range(n_model):
                max_weight = max(max_weight, numpy.max(weight[i]))
            util.notice("maximum sensitivity: %f Jy/beam" %
                        (1.0 / numpy.sqrt(max_weight)))

        # Comment from CASA source code:
        #
        # Calculate the threshold for this cycle. Add a safety factor
        #
        # fractionOfPsf controls how deep the cleaning should go.
        # There are two user-controls.
        # cycleFactor_p : scale factor for the PSF sidelobe level.
        #                        1 : clean down to the psf sidelobe level
        #                        <1 : go deeper
        #                        >1 : shallower : stop sooner.
        #                        Default : 1.5
        # cycleMaxPsfFraction_p : scale factor as a fraction of the PSF peak
        #                                    must be 0.0 < xx < 1.0 (obviously)
        #                                    Default : 0.8
        fraction_of_psf = min(options.cycle_max_psf_fraction,
                              options.cycle_factor * max_sidelobe)

        if fraction_of_psf > 0.8:
            util.warning(
                "PSF fraction for threshold computation is too"
                " high: %f. Forcing to 0.8 to ensure that the threshold is"
                " smaller than the peak residual!" % fraction_of_psf)
            fraction_of_psf = 0.8  # painfully slow!

        # Update cycle threshold.
        cycle_threshold = max(0.95 * options.threshold,
                              fraction_of_psf * absmax)
        clark_options["cycle_threshold"] = cycle_threshold

        util.notice("minor cycle threshold max(0.95 * %f, peak residual * %f):"
                    " %f" %
                    (options.threshold, fraction_of_psf, cycle_threshold))

        # Execute the minor cycle (Clark clean) for each channel of each model.
        util.notice("starting minor cycle...")
        for i in range(n_model):
            if max(abs(resmin[i]), abs(resmax[i])) < cycle_threshold:
                util.notice("model %d/%d: peak residual below threshold" %
                            (i, n_model - 1))
                continue

            if max_psf[i] <= 0.0:
                util.warning("model %d/%d: point spread function negative or"
                             " zero" % (i, n_model - 1))
                continue

            # Zero the delta image for this model.
            delta[i].fill(0.0)

            for (cr, cr_slice) in enumerate(cr_slices):
                for ch in range(len(residual[i])):
                    # TODO: The value of max_weight is only updated during
                    # cycle 0. Is this correct?
                    #
                    assert (len(weight[i].shape) == 2
                            and weight[i].shape[:2] == residual[i].shape[:2])

                    plane_weight = numpy.sqrt(weight[i][ch, cr_slice] /
                                              max_weight)
                    if numpy.any(plane_weight > 0.01):
                        weight_mask = numpy.ones((residual[i].shape[2:]))
                    else:
                        weight_mask = numpy.zeros((residual[i].shape[2:]))

                    # Call CASA Clark clean implementation (minor cycle).
                    # TODO: When cleaning each Stokes parameter separately,
                    # the PSF of Stokes I is used for all others as well?
                    #
                    # Comment from CASA source code:
                    #
                    # We only want the PSF for the first polarization so we
                    # iterate over polarization LAST.
                    #
                    result = casaimwrap.clark_clean(
                        psf[i][ch, 0, :, :], residual[i][ch, cr_slice, :, :],
                        weight_mask, iterations[i, cr, ch], clark_options)

                    if result["iterations"] > iterations[i, cr, ch]:
                        updated[i] = True
                        delta[i][ch, cr_slice, :, :] = result["delta"]
                        iterations[i, cr, ch] = result["iterations"]
                    else:
                        assert (numpy.all(result["delta"] == 0.0))

                util.notice("model %d/%d: stokes: %s, cleaned: %f Jy, "
                            "iterations per channel: %s" %
                            (i, n_model - 1, stokes[cr],
                             numpy.sum(delta[i][ch, cr_slice, :, :]),
                             str(iterations[i, cr, :])))

        # Update model images if required.
        for i in range(n_model):
            if updated[i]:
                model[i] += delta[i]

        # Update major cycle counter.
        cycle += 1

    if any(updated):
        util.notice("finalizing residual images for all fields...")
        for i in range(n_model):
            if updated[i]:
                residual[i], weight[i] = processor.residual(
                    image_coordinates, model[i],
                    processors.Normalization.FLAT_NOISE,
                    processors.Normalization.FLAT_NOISE)
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f" %
                        (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)
    else:
        util.notice("residual images for all fields are up-to-date...")

    # Store output images.
    util.notice("storing average response...")
    util.store_image(options.image + ".response", image_coordinates,
                     processor.response(image_coordinates, image_shape))

    util.notice("storing model images...")
    for i in range(n_model):
        util.store_image(options.image + ".model.flat_noise",
                         image_coordinates, model[i])
        util.store_image(
            options.image + ".model", image_coordinates,
            processor.normalize(image_coordinates, model[i],
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    util.notice("storing residual images...")
    for i in range(n_model):
        util.store_image(options.image + ".residual.flat_noise",
                         image_coordinates, residual[i])
        util.store_image(
            options.image + ".residual", image_coordinates,
            processor.normalize(image_coordinates, residual[i],
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    util.notice("storing restored images...")
    for i in range(n_model):
        restored = restore_image(image_coordinates.dict(), model[i],
                                 residual[i], beam[i])

        util.store_image(options.image + ".restored.flat_noise",
                         image_coordinates, restored)
        util.store_image(
            options.image + ".restored", image_coordinates,
            processor.normalize(image_coordinates, restored,
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    # Print some statistics.
    for i in range(n_model):
        util.notice(
            "model %d/%d: clean flux: %f, residual rms: %f" %
            (i, n_model - 1, numpy.sum(model[i]), numpy.std(residual[i])))

    if diverged:
        util.error("clean diverged.")
    elif absmax < options.threshold:
        util.notice("clean converged.")
    else:
        util.warning("clean did not reach threshold: %f Jy." %
                     options.threshold)