示例#1
0
    def dump(self, databases_names, database_dump_file):
        """Dump Database."""
        cmd = "mysqldump" \
              + " --host=%s" % self.host \
              + " --user=%s" % self.username \
              + " --password=%s" % self.password \
              + " --opt" \
              + " --quote-names" \
              + " --single-transaction" \
              + " --quick" \
              + " --databases %s" % ' '.join(databases_names) \
              + " > %s" % database_dump_file

        if self.verbose:
            log_cmd = re.sub(r" --password=.*? ", " --password=******** ", cmd)
            log.verbose("%s" % log_cmd)

        if not self.dry_run:
            path = '.'

            exit_code = shell.run_shell_cmd(cmd, path, self.verbose)

            return exit_code

        return 0
示例#2
0
def download_no_proxy_init(url, dst):
    log.log("FETCH   {}".format(url))
    log.verbose("downloading '{}' to '{}'...".format(url, dst))
    #urllib.request.urlretrieve(url, dst)
    response = urllib.request.urlopen(url)
    with open(dst, "wb") as file:
        file.write(response.read())
示例#3
0
def add_gen_obj_from_file(filename):
    gen_obj = GenesisObject.from_file(filename)
    gens_path = gen_obj.get_obj_path()
    if os.path.exists(gens_path):
        log.verbose("genesis object '{}' is already added".format(
            gen_obj.name))
        return gens_path
    return add_gen_obj_from_file_not_already_added(gen_obj, filename,
                                                   [os.path.dirname(filename)])
示例#4
0
def add_gen_obj_from_data(gen_obj):
    gens_path = gen_obj.get_obj_path()
    if os.path.exists(gens_path):
        log.verbose("genesis object '{}' is already added".format(
            gen_obj.name))
        return gens_path
    for in_entry in gen_obj.in_list:
        add_gen_obj(in_entry, [])
    return write_gen_obj(gen_obj)
 def stop_playback(self):
     info("FFplay Stopped.")
     if self.process.poll() is None:
         verbose("SENT KILL TO FFplay.")
         try:
             self.process.kill()
             # wait until kill.
             self.process.wait()
         except subprocess:
             warning("There was a problem terminating FFplay. It might be because it already closed. Oh NO")
示例#6
0
文件: core.py 项目: mmilata/virtpwn
    def _load_conf(self):
        abs_conf_fn = self.abs_conf_fn()
        log.verbose("Machine config: %s" % abs_conf_fn)
        conf_file = file(abs_conf_fn, 'r')
        self.conf = yaml.load(conf_file)
        log.debug(" -> %s" % self.conf)

        self.base = self._conf_get_required('base')
        self.vm_user = self.conf.get('user', 'root')
        self.mounts = self.conf.get('mount', [])
示例#7
0
def get_jobs(ctrdirs):
    """
    Scan control directories for jobs with status *INLRMS* or *CANCELING*.

    :param list ctrdirs: list of paths to control directories
    :return: dictionary that maps local job ID to job object
    :rtype: :py:obj:`dict` { :py:obj:`str` : :py:obj:`object` ... }

    .. note:: The returned job obects have the following attributes: \
    ``localid``, ``gridid``, ``local_file``, ``lrms_done_file``, \
    ``state``, ``uid``, ``gid``, ``sessiondir``, ``diag_file``, \
    ``count_file``, ``errors_file`` and ``comment_file``.

    """

    import re

    jobs = {}
    for ctrdir in ctrdirs:
        procdir = ctrdir + '/processing'
        for fname in os.listdir(procdir):
            try:
                globalid = re.search(r'job.(?P<id>\w+).status',fname).groupdict()['id']
                with open('%s/%s' % (procdir, fname), 'r') as f:
                    if re.search('INLRMS|CANCELING', f.readline()):
                        job = type('Job', (object, ), {})()
                        job.globalid = globalid
                        job.controldir = ctrdir
                        job.local_file = '%s/job.%s.local' % (ctrdir, job.globalid)
                        if not read_local_file(job): # sets localid and sessiondir
                            continue
                        job.lrms_done_file = '%s/job.%s.lrms_done' % (ctrdir, job.globalid)
                        job.count_file = '%s/job.%s.lrms_job' % (ctrdir, job.globalid)
                        job.state = 'UNKNOWN'
                        job.message = ''
                        job.diag_file = '%s.diag' % job.sessiondir
                        job.errors_file = '%s.errors' % job.sessiondir
                        job.comment_file = '%s.comment' % job.sessiondir
                        jobs[job.localid] = job
                        try:
                            job.uid = os.stat(job.diag_file).st_uid
                            job.gid = os.stat(job.diag_file).st_gid
                        except:
                            verbose('Failed to stat %s' % job.diag_file, 'common.scan')
                            job.uid = UID
                            job.gid = GID
            except AttributeError:
                # Not a jobfile
                continue
            except IOError as e:
                # Possibly .status file deleted by other process
                verbose('IOError when scanning for jobs in /processing:\n%s' % str(e))
                continue
    return jobs
 def __run_Encoder(self, video_url, videoLocation):
     self.running = None
     verbose("Opening FFmpeg.")
     command = ["ffmpeg", "-loglevel", "verbose"]  # Enables Full Logs
     if self.Headers:
         for header in self.Headers:
             command.extend(["-headers", '{0}: {1}'.format(header, self.Headers[header])])
     command.extend(["-y", "-i", video_url, "-c:v", "copy", "-c:a", "copy",
                    videoLocation])
     self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                                     stdin=subprocess.PIPE, universal_newlines=True)
     self.__startHandler()
示例#9
0
文件: core.py 项目: mmilata/virtpwn
 def _generate_id(self):
     assert(self.name)
     doms = self._get_domains()
     new_id = self.name
     pfix = 0
     while new_id in doms:
         pfix += 1
         new_id = "%s_%d" % (self.name, pfix)
     if pfix > 0:
         log.verbose("%s domain exists, using %s" % (self.name, new_id))
     self.vm_id = new_id
     return self.vm_id
示例#10
0
文件: file_io.py 项目: ASPP/bloscpack
def _read_metadata(input_fp):
    """ Read the metadata and header from a file pointer.

    Parameters
    ----------
    input_fp : file like
        a file pointer to read from

    Returns
    -------
    metadata : dict
        the metadata
    metadata_header : dict
        the metadata contents as dict

    Notes
    -----
    The 'input_fp' should point to the position where the metadata starts. The
    number of bytes to read will be determined from the metadata header.

    """
    raw_metadata_header = input_fp.read(METADATA_HEADER_LENGTH)
    log.debug("raw metadata header: '%s'" % repr(raw_metadata_header))
    metadata_header = MetadataHeader.decode(raw_metadata_header)
    log.debug(metadata_header.pformat())
    metadata = input_fp.read(metadata_header.meta_comp_size)
    prealloc = metadata_header.max_meta_size - metadata_header.meta_comp_size
    input_fp.seek(prealloc, 1)
    if metadata_header.meta_checksum != 'None':
        metadata_checksum_impl = CHECKSUMS_LOOKUP[metadata_header.meta_checksum]
        metadata_expected_digest = input_fp.read(metadata_checksum_impl.size)
        metadata_received_digest = metadata_checksum_impl(metadata)
        if metadata_received_digest != metadata_expected_digest:
            raise ChecksumMismatch(
                    "Checksum mismatch detected in metadata "
                    "expected: '%s', received: '%s'" %
                    (repr(metadata_expected_digest),
                        repr(metadata_received_digest)))
        else:
            log.debug('metadata checksum OK (%s): %s ' %
                    (metadata_checksum_impl.name,
                        repr(metadata_received_digest)))
    if metadata_header.meta_codec != 'None':
        metadata_codec_impl = CODECS_LOOKUP[metadata_header.meta_codec]
        metadata = metadata_codec_impl.decompress(metadata)
    log.verbose("read %s metadata of size: '%s'" %
            # FIXME meta_codec?
            ('compressed' if metadata_header.meta_codec != 'None' else
                'uncompressed', metadata_header.meta_comp_size))
    serializer_impl = SERIALIZERS_LOOKUP[metadata_header.magic_format]
    metadata = serializer_impl.loads(metadata)
    return metadata, metadata_header
示例#11
0
    def get_repos(self):
        """Get Gerrit Repos via API."""
        log.print_log("Getting Gerrit Repos via API")

        api_url = self.url + '/projects/?t'

        if self.verbose:
            extra_options = ''

            if not self.verify_ssl:
                extra_options += 'k'

            log.verbose("curl -L%s %s" % (extra_options, api_url))

        try:
            context = ssl.create_default_context()

            if not self.verify_ssl:
                context = ssl._create_unverified_context()

            response = urllib2.urlopen(api_url, context=context)

        except urllib2.HTTPError as err:
            raise RuntimeError("ERROR: (Gerrit API) HTTPError = %s (%s)" %
                               (str(err.code), err.reason))
        except urllib2.URLError as err:
            raise RuntimeError("ERROR: (Gerrit API) URLError = %s (%s)" %
                               (str(err.reason), err.reason))
        except httplib.HTTPException as err:
            raise RuntimeError("ERROR: (Gerrit API) HTTPException = %s" %
                               str(err.reason))
        except Exception:
            import traceback
            raise RuntimeError('ERROR: (Gerrit API) ' + traceback.format_exc())

        if response.getcode() != 200:
            raise RuntimeError(
                "ERROR: (Gerrit API) Did not get 200 response from: %s" %
                api_url)

        magic_prefix = ")]}'"
        response_body = response.read()
        if response_body.startswith(magic_prefix):
            response_body = response_body[len(magic_prefix):]

        data = json.loads(response_body)

        repos = []
        for repo in data:
            repos.append(repo)

        return repos
示例#12
0
文件: core.py 项目: mmilata/virtpwn
 def load(self, path, conf_fn):
     self.path = path
     self.conf_fn = conf_fn
     self.name = self.path.split(os.sep)[-1]
     # config
     self.vm_id = None
     self.vm_user = '******'
     self.vm_init = None
     self.vm_mnt = {}
     log.verbose("Loading %s:" % self.name_pp)
     self._load_conf()
     self._load_data()
     self._check_state()
示例#13
0
文件: core.py 项目: mmilata/virtpwn
    def _load_data(self):
        abs_data_fn = self.abs_data_fn()
        if not os.path.isfile(abs_data_fn):
            log.verbose("No machine data: %s" % abs_data_fn)
            return
        data_file = file(abs_data_fn, 'r')
        log.verbose("Machine data: %s" % abs_data_fn)
        data = yaml.load(data_file)
        log.debug(" -> %s" % data)

        self.vm_id = data.get('vm_id')
        self.vm_init = data.get('vm_init')
        self.vm_mnt = data.get('vm_mnt', {})
 def __run_player(self, video_url):
     self.running = None
     verbose("Opening FFplay.")
     command = ["ffplay", "-loglevel", "verbose", "-x", "1280", "-y", "720", "-window_title",
                "YouTubeLiveChannelRecoder - FFPLAY", "-i", video_url]
     if self.Headers:
         for header in self.Headers:
             command.extend(["-headers", '{0}: {1}'.format(header, self.Headers[header])])
     self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                                     stdin=subprocess.PIPE, universal_newlines=True)
     encoder_crash_handler = Thread(target=self.__crashHandler, name="FFPLAY Crash Handler.")
     encoder_crash_handler.daemon = True  # needed control+C to work.
     encoder_crash_handler.start()
 def __run_player(self, video_url):
     self.running = None
     verbose("Opening VLC.")
     command = [self.VLCLocation if self.VLCLocation is not None else 'vlc',
                video_url, ":http-user-agent={0}".format("WEB-CLIENT"),
                "--meta-title=YouTubeLiveChannelRecoder"]
     if self.Headers:
         warning("HEADERS NOT SUPPORTED IN VLC.")
     self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                                     stdin=subprocess.PIPE, universal_newlines=True)
     encoder_crash_handler = Thread(target=self.__crashHandler, name="VLC Crash Handler.")
     encoder_crash_handler.daemon = True  # needed control+C to work.
     encoder_crash_handler.start()
示例#16
0
文件: core.py 项目: mmilata/virtpwn
 def _save_data(self):
     data = {}
     if self.vm_id:
         data['vm_id'] = self.vm_id
     if self.vm_init:
         data['vm_init'] = self.vm_init
     if self.vm_mnt:
         data['vm_mnt'] = self.vm_mnt
     if data:
         abs_data_fn = self.abs_data_fn()
         log.verbose("Updating machine data: %s" % abs_data_fn)
         data_file = file(abs_data_fn, 'w')
         yaml.dump(data, data_file)
示例#17
0
def run_impl(check_return_code, args, **kwargs):
    args = fix_args(args, **kwargs)
    cwd = kwargs.get("cwd")
    capture_output = kwargs.get("capture_output")
    log.verbose("[RUN{}] {}{}".format(
        "-CAPTURE-OUT" if capture_output else "",
        "CWD={} ".format(cwd) if cwd else "",
        " ".join(args)))
    result = subprocess.run(args, **kwargs)
    if check_return_code:
        check(result, False)
    if capture_output:
        return result.stdout.decode("ascii")
    return result
示例#18
0
文件: ops.py 项目: marler8997/genesis
def fetch_extract_archive_to_ca(url, basename, in_hash, keep_basename,
                                out_hash):
    global _disable_op_fail_message
    extract_basename = extract.get_extracted_path(basename)

    if len(out_hash) == 0:
        extract_hash = get_unknown_hash()
    else:
        extract_hash = out_hash
        ca_out_path = genesis.make_ca_path(out_hash, extract_basename)
        if os.path.exists(ca_out_path):
            log.verbose(
                "archive '{}' already fetched and extracted to '{}'".format(
                    url, ca_out_path))
            return ca_out_path

    ca_in_path = fetch_archive_to_ca(url, basename, in_hash, 'inHash')
    extract_path = genesis.make_ca_stage_path(extract_hash, extract_basename)
    if os.path.exists(extract_path):
        sys.exit(
            "Error: cannot extract '{}' because extract path '{}' already exists"
            .format(ca_in_path, extract_path))

    actual_extract_basename = extract.extract(ca_in_path,
                                              extract_path,
                                              keep_basename=keep_basename)
    if not actual_extract_basename.endswith(extract_basename):
        sys.exit(
            "Error(code bug) extract basename mismatch, expected '{}' got '{}'"
            .format(extract_basename, actual_extract_basename))
    file_type, actual_out_hash = genesis.hash_path(extract_path)
    if len(out_hash) == 0:
        correct_path = genesis.make_ca_path(actual_out_hash, extract_basename)
        if os.path.exists(correct_path):
            log.rmtree(extract_path)
        else:
            log.rename(extract_path, correct_path)
        log.log("Please update outHash for '{}' to this:".format(url))
        log.log(actual_out_hash)
        _disable_op_fail_message = True
        sys.exit(1)
    if out_hash != actual_out_hash:
        log.log("outHash Mismatch for '{}', expected then actual:".format(url))
        log.log(out_hash)
        log.log(actual_out_hash)
        _disable_op_fail_message = True
        sys.exit(1)
    log.rename(extract_path, ca_out_path)
    return ca_out_path
示例#19
0
def run_shell_cmd(cmd, path='.', verbose=False):
    """Run Shell Command."""
    if verbose:
        log.verbose(cmd)

    process = sp.Popen(cmd, shell=True, cwd=path)
    stdout, stderr = process.communicate()
    exit_code = process.wait()

    for output in [stdout, stderr]:
        if output is not None:
            for line in output:
                log.print_log(line.strip('\n'))

    return exit_code
示例#20
0
文件: species.py 项目: gmagoon/RMG-Py
def makeNewSpecies(structure, label='', reactive=True):
	"""
	Attempt to make a new species based on a chemical `structure`, which is a
	:class:`Structure` object.

	The proposed species is checked against the list of existing species; if the
	species already exists, this function returns
	the existing species. If the species does not exist, a :class:`Species`
	object is created and returned after being appended to the global species
	list.
	"""
	global speciesCounter
#	# Recalculate atom types for proposed structure (hopefully not necessary)
#	structure.simplifyAtomTypes()
#	structure.updateAtomTypes()

	# First check cache and return if species is found
	for i, spec in enumerate(speciesCache):
		if spec.isIsomorphic(structure):
			speciesCache.pop(i)
			speciesCache.insert(0, spec)
			return spec

	# Return an existing species if a match is found
	for spec in speciesList:
		if spec.isIsomorphic(structure):
			speciesCache.insert(0, spec)
			if len(speciesCache) > speciesCacheMaxSize: speciesCache.pop()
			return spec

	# Return None if the species has a forbidden structure
	if thermo.forbiddenStructures is not None:
		for lbl, struct in thermo.forbiddenStructures.iteritems():
			if structure.isSubgraphIsomorphic(struct): return None

	# Otherwise make a new species
	if label == '':
#		label = structure.getFormula()
#		for atom in structure.atoms():
#			if atom.hasFreeElectron(): label += 'J'
		label = structure.toSMILES()
	
	# Note in the log
	spec = Species(speciesCounter+1, label, structure, reactive)
	logging.verbose('Creating new species %s' % str(spec))
	return processNewSpecies(spec)
示例#21
0
文件: ops.py 项目: marler8997/genesis
def add_package_links(path, tmpout):
    genesis_dir = os.path.join(tmpout, "genesis")
    log.mkdirs_if_needed(genesis_dir)

    #log.log("[DEBUG] add_package_links '{}'".format(path))
    env_dep_file = os.path.join(tmpout, "genesis", "deps")
    if not add_depend_if_not_added(env_dep_file, path.encode('ascii')):
        return

    # add deps deps
    deps_filename = os.path.join(path, "genesis", "deps")
    if not os.path.exists(deps_filename):
        log.verbose("deps file does not exist {}".format(deps_filename))
    else:
        with open(deps_filename, "r") as file:
            while True:
                entry = file.readline()
                if not entry:
                    break
                entry = entry.rstrip()
                add_package_links(entry, tmpout)

    bin = os.path.join(path, "bin")
    tmpout_bin = os.path.join(tmpout, "bin")
    if os.path.isdir(bin):
        if not os.path.exists(tmpout_bin):
            log.mkdir(tmpout_bin)
        for entry in os.listdir(bin):
            target_exe = os.path.join(bin, entry)
            link_file = os.path.join(tmpout_bin, entry)
            #log.log("[DEBUG] linking {}".format(link_file))
            make_env_exe_link(target_exe, link_file)
    extra_bin_filename = os.path.join(path, "genesis", "extra-bin")
    if not os.path.exists(extra_bin_filename):
        log.verbose(
            "extra-bin file does not exist {}".format(extra_bin_filename))
    else:
        with open(extra_bin_filename, "r") as file:
            while True:
                entry = file.readline()
                if not entry:
                    break
                entry = entry.rstrip()
                target_exe = os.path.join(path, replace_dir_seps(entry))
                link_file = os.path.join(tmpout_bin, os.path.basename(entry))
                make_env_exe_link(target_exe, link_file)
示例#22
0
    def get_client(self):
        """Get SSH Client."""
        if self.ssh_client is not None:
            return self.ssh_client

        keys = []
        if self._key_file_path:
            keys.append(
                paramiko.RSAKey.from_private_key_file(self._key_file_path))
        else:
            agent = paramiko.Agent()
            keys = agent.get_keys()

            if not keys:
                raise paramiko.AuthenticationException(
                    "ERROR: No SSH Agent keys found!")

        client = paramiko.SSHClient()
        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        if self.verbose:
            key_file_path = ''
            if self._key_file_path:
                key_file_path = " -i %s" % (self._key_file_path)

            log.verbose(
                "ssh%s -p %d %s@%s" %
                (key_file_path, self.port, self.username, self.hostname))

        saved_exception = paramiko.AuthenticationException(
            "ERROR: SSH Authentication Error!")
        for key in keys:
            if isinstance(key, paramiko.AgentKey):
                fp = hexlify(key.get_fingerprint())
                log.info("Trying SSH Agent key: {0}".format(fp))
            try:
                client.connect(hostname=self.hostname,
                               username=self.username,
                               port=self.port,
                               pkey=key,
                               timeout=self.timeout)

                return client
            except paramiko.SSHException, e:
                saved_exception = e
示例#23
0
 def __init__(self):
     self.proxies = urllib.request.getproxies()
     if os.name == "nt":
         # check if an auto configure script is being used
         with winreg.OpenKey(winreg.HKEY_CURRENT_USER, INTERNET_SETTINGS, 0, winreg.KEY_READ) as key:
             autoconfurl = winregGetString(key, "AutoConfigURL")
             if autoconfurl:
                 slug_url = slugify(autoconfurl)
                 downloaded_path = os.path.join(genesis.get_proxy_path(), slug_url)
                 if os.path.exists(downloaded_path):
                     log.verbose("autoconf script '{}' has already been downloaded".format(downloaded_path))
                 else:
                     tmp_path = downloaded_path + ".downloading"
                     if os.path.exists(tmp_path):
                         sys.exit("not impl: download path '{}' already exists".format(tmp_path))
                     download_no_proxy_init(autoconfurl, tmp_path)
                     log.rename(tmp_path, downloaded_path)
                 log.log("WARNING: need to parse/use '{}' (probably need spidermonkey and/or pypac)".format(downloaded_path))
示例#24
0
文件: core.py 项目: mmilata/virtpwn
 def vm_clean_mounts(self):
     if not self.vm_mnt:
         return
     log.info("Cleaning invalid mounts...")
     dsts = self.vm_mnt.keys()
     for dst in dsts:
         abs_dst = self._proj_path(dst)
         cmd_str = "mount | grep '%s'" % abs_dst
         ret, _, _ = cmd.run(cmd_str)
         if ret != 0:
             log.info("%s doesn't seem to be mounted, cleaning."
                      % dst)
             self.vm_mnt.pop(dst)
             self._save_data()
             try:
                 os.rmdir(abs_dst)
             except Exception, e:
                 log.verbose("Can't remove mount point %s" % abs_dst)
示例#25
0
文件: core.py 项目: mmilata/virtpwn
 def _check_state(self):
     log.debug("Checking machine state...")
     if self.vm_id:
         ret, out, err = virsh('domstate "%s"' % self.vm_id)
         if ret != 0:
             log.verbose("No VM found for saved data. Cleaning.")
             self._remove_vm_data()
             self.state = const.VMS_NOT_CREATED
         else:
             if out == 'shut off':
                 self.state = const.VMS_POWEROFF
             elif out == 'running':
                 self.state = const.VMS_RUNNING
             else:
                 raise exception.VirshParseError(out=out)
     else:
         self.state = const.VMS_NOT_CREATED
     log.debug("%s seems to be %s." % (self.name_pp, self.state_pp))
示例#26
0
    def run_file_cmd(self, sql_file):
        """Run File CMD."""
        cmd = "mysql" \
              + " --host=%s" % self.host \
              + " --user=%s" % self.username \
              + " --password=%s" % self.password \
              + " < %s" % sql_file

        if self.verbose:
            log_cmd = re.sub(r" --password=.*? ", " --password=******** ", cmd)
            log.verbose("%s" % log_cmd)

        if not self.dry_run:
            exit_code = shell.run_shell_cmd(cmd, '.', False)

            return exit_code

        return 0
示例#27
0
文件: file_io.py 项目: ASPP/bloscpack
def unpack_file(in_file, out_file):
    """ Main function for decompressing a file.

    Parameters
    ----------
    in_file : str
        the name of the input file
    out_file : str
        the name of the output file

    Returns
    -------
    metadata : str
        the metadata contained in the file if present

    Raises
    ------

    FormatVersionMismatch
        if the file has an unmatching format version number
    ChecksumMismatch
        if any of the chunks fail to produce the correct checksum
    """
    in_file_size = path.getsize(in_file)
    log.verbose('input file size: %s' % pretty_size(in_file_size))
    with open_two_file(open(in_file, 'rb'), open(out_file, 'wb')) as \
            (input_fp, output_fp):
        source = CompressedFPSource(input_fp)
        sink = PlainFPSink(output_fp, source.nchunks)
        unpack(source, sink)
    out_file_size = path.getsize(out_file)
    log.verbose('output file size: %s' % pretty_size(out_file_size))
    log.verbose('decompression ratio: %f' % (out_file_size / in_file_size))
    return source.metadata
示例#28
0
文件: cli.py 项目: ASPP/bloscpack
def check_files(in_file, out_file, args):
    """ Check files exist/don't exist.

    Parameters
    ----------
    in_file : str:
        the input file
    out_file : str
        the output file
    args : parser args
        any additional arguments from the parser

    Raises
    ------
    FileNotFound
        in case any of the files isn't found.

    """
    if not path.exists(in_file):
        raise FileNotFound("input file '%s' does not exist!" % in_file)
    if path.exists(out_file):
        if not args.force:
            raise FileNotFound("output file '%s' exists!" % out_file)
        else:
            log.verbose("overwriting existing file: '%s'" % out_file)
    log.verbose("input file is: '%s'" % in_file)
    log.verbose("output file is: '%s'" % out_file)
示例#29
0
文件: ip.py 项目: mmilata/virtpwn
def _get_instance_ip(mac):
    errors = []
    # 1. get IP from DHCP leases
    try:
        cmd_str = 'grep -i "%s" /var/lib/libvirt/dnsmasq/*.leases' % mac
        lease = cmd.run_or_die(cmd_str).rstrip()
        ip = lease.split(" ")[2]
        log.verbose("Got IP from DHCP leases.")
        return ip
    except Exception as e:
        errors.append(('DHCP leases', str(e)))

    # 2. get IP from ARP table
    try:
        arp_stdout = cmd.run_or_die('arp -n')
        for line in arp_stdout.splitlines()[1:]:
            parts = line.split()
            if parts[2] == mac:
                log.verbose("Got IP from ARP cache.")
                return parts[0]
    except Exception as e:
        errors.append(('ARP cache', str(e)))
    else:
        errors.append(('ARP cache', 'MAC not found'))

    errsum = "\n".join(map(lambda e: " * %s" % ": ".join(e), errors))
    desc = 'Failed to obtain IP address from following sources:\n%s' % errsum
    log.verbose(desc)
    return None
示例#30
0
    def run_command(self, cmd, log_output=True):
        """SSH Command."""
        if self.verbose:
            log.verbose("Running remote command: %s" % cmd)

        ssh_client = self.get_client()

        _, stdout, stderr = ssh_client.exec_command(cmd)

        if log_output:
            for output in [stdout, stderr]:
                if output is not None:
                    for line in output:
                        log.print_log(line.strip('\n'), False, False)

        ret_stdout = stdout.read()
        ret_stderr = stderr.read()

        if self.ssh_client is None:
            ssh_client.close()

        return ret_stdout, ret_stderr
示例#31
0
    def copy_to_remote(self,
                       local_path,
                       remote_path,
                       exclude_files=[],
                       exclude_folders=[]):
        """Copy Folders to Remote."""
        log.print_log("Copy path to remote: %s" % self.hostname)

        created_remote_directories = []
        for root, dirnames, filenames in os.walk(local_path, topdown=True):
            dirnames[:] = [d for d in dirnames if d not in exclude_folders]

            for filename in filenames:
                skip = False
                for exclude_file in exclude_files:
                    if fnmatch.fnmatch(filename, exclude_file):
                        skip = True

                if skip:
                    continue

                remote_file_path = os.path.join(root, filename)
                remote_file_path = str.replace(remote_file_path, local_path,
                                               '')
                remote_file_path = remote_path.rstrip(
                    '/') + '/' + remote_file_path.lstrip('/')

                remote_file_dir = os.path.dirname(remote_file_path)
                if remote_file_dir not in created_remote_directories:

                    if self.verbose:
                        log.verbose("Creating remote path: %s" %
                                    remote_file_dir)

                    self.run_command("mkdir -p %s" % remote_file_dir)
                    created_remote_directories.append(remote_file_dir)

                local_filename = os.path.join(root, filename)
                self.scp_copy_file(local_filename, remote_file_path)
示例#32
0
文件: ops.py 项目: marler8997/genesis
def fetch_archive_to_ca(url, basename, in_hash, hash_name):
    global _disable_op_fail_message
    if len(in_hash) == 0:
        download_hash = get_unknown_hash()
    else:
        download_hash = in_hash
        ca_in_path = genesis.make_ca_path(in_hash, basename)
        if os.path.exists(ca_in_path):
            log.verbose("archive '{}' already fetched to '{}'".format(
                url, ca_in_path))
            return ca_in_path

    # TODO: need a lock file
    download_path = genesis.make_ca_stage_path(download_hash, basename)
    if os.path.exists(download_path):
        sys.exit(
            "Error: cannot download '{}' because download path '{}' already exists"
            .format(url, download_path))
    download.download(url, download_path)
    file_type, actual_in_hash = genesis.hash_path(download_path)
    if len(in_hash) == 0:
        correct_path = genesis.make_ca_path(actual_in_hash, basename)
        if os.path.exists(correct_path):
            log.rmtree_or_file(download_path)
        else:
            log.rename(download_path, correct_path)
        log.log("Please update {} for '{}' to this:".format(hash_name, url))
        log.log(actual_in_hash)
        _disable_op_fail_message = True
        sys.exit(1)
    if in_hash != actual_in_hash:
        log.log("{} Mismatch for '{}', expected then actual:".format(
            hash_name, url))
        log.log(in_hash)
        log.log(actual_in_hash)
        _disable_op_fail_message = True
        sys.exit(1)
    log.rename(download_path, ca_in_path)
    return ca_in_path
示例#33
0
    def job_task(self, jobName, task='enable'):
        """Job Task (enable / disable)."""
        jobUrl = '/'.join([self.jenkins_url, 'job', jobName, task])

        if self.verbose:
            log.verbose(jobUrl)

        data = urllib.urlencode({})

        try:
            urllib2.urlopen(jobUrl, data)
        except urllib2.HTTPError as err:
            raise RuntimeError("ERROR: (Jenkins) HTTPError = %s (%s)" %
                               (str(err.code), err.reason))
        except urllib2.URLError as err:
            raise RuntimeError("ERROR: (Jenkins) URLError = %s (%s)" %
                               (str(err.reason), err.reason))
        except httplib.HTTPException as err:
            raise RuntimeError("ERROR: (Jenkins) HTTPException = %s" %
                               str(err.reason))
        except Exception:
            import traceback
            raise RuntimeError('ERROR: (Jenkins) ' + traceback.format_exc())
示例#34
0
    def get_job_info(self, jobName, jobBuild='lastBuild'):
        """Get Jenkins Build Info."""
        jobUrl = '/'.join(
            [self.jenkins_url, 'job', jobName, jobBuild, 'api/json'])

        if self.verbose:
            log.verbose(jobUrl)

        try:
            jsonData = urllib2.urlopen(jobUrl)
        except urllib2.HTTPError as err:
            raise RuntimeError("ERROR: (Jenkins) HTTPError = %s (%s)" %
                               (str(err.code), err.reason))
        except urllib2.URLError as err:
            raise RuntimeError("ERROR: (Jenkins) URLError = %s (%s)" %
                               (str(err.reason), err.reason))
        except httplib.HTTPException as err:
            raise RuntimeError("ERROR: (Jenkins) HTTPException = %s" %
                               str(err.reason))
        except Exception:
            import traceback
            raise RuntimeError('ERROR: (Jenkins) ' + traceback.format_exc())

        return json.load(jsonData)
示例#35
0
    def scp_copy_file(self, local_file_path, remote_file_path):
        """Copy file to remote via SCP."""
        ssh_client = self.get_client()

        if self.verbose:
            log.verbose("Copy file to remote: -")
            log.verbose("  %s" % local_file_path)
            log.verbose("  %s" % remote_file_path)

        with scp.SCPClient(ssh_client.get_transport()) as scp_client:
            scp_client.put(local_file_path, remote_file_path)

        if self.ssh_client is None:
            ssh_client.close()
示例#36
0
文件: file_io.py 项目: ASPP/bloscpack
def pack_file(in_file, out_file, chunk_size=DEFAULT_CHUNK_SIZE, metadata=None,
              blosc_args=None,
              bloscpack_args=None,
              metadata_args=None):
    """ Main function for compressing a file.

    Parameters
    ----------
    in_file : str
        the name of the input file
    out_file : str
        the name of the output file
    chunk_size : int
        the desired chunk size in bytes
    metadata : dict
        the metadata dict
    blosc_args : BloscArgs
        blosc args
    bloscpack_args : BloscpackArgs
        bloscpack args
    metadata_args : MetadataArgs
        metadata args

    Raises
    ------

    ChunkingException
        if there was a problem caculating the chunks

    # TODO document which arguments are silently ignored

    """
    in_file_size = path.getsize(in_file)
    log.verbose('input file size: %s' % double_pretty_size(in_file_size))
    # calculate chunk sizes
    nchunks, chunk_size, last_chunk_size = \
            calculate_nchunks(in_file_size, chunk_size)
    with open_two_file(open(in_file, 'rb'), open(out_file, 'wb')) as \
            (input_fp, output_fp):
        source = PlainFPSource(input_fp)
        sink = CompressedFPSink(output_fp)
        pack(source, sink,
                nchunks, chunk_size, last_chunk_size,
                metadata=metadata,
                blosc_args=blosc_args,
                bloscpack_args=bloscpack_args,
                metadata_args=metadata_args)
    out_file_size = path.getsize(out_file)
    log.verbose('output file size: %s' % double_pretty_size(out_file_size))
    log.verbose('compression ratio: %f' % (in_file_size/out_file_size))
fails[2] = 0
fails[3] = 0
fails[4] = 0

# left side: sensors as listed in in ".._raw"-file
# right side: thingspeak field no.
sensors = [0 for x in range(sensnumb)]
sensors[0] = 1
sensors[1] = 2
sensors[2] = 5
sensors[3] = 4
sensors[4] = 6
# sensors[i]=$fieldNR


log.verbose(False)
log.create("ds18b20")
log.logging.warning("----Start-----")


def thermometer(i=0):
    log.logging.info("%d: reading sensor.." % (i + 1))
    strg = " "
    temp = ds18b20_raw.read_temp(i)
    if temp > 60:
        temp = temp / 0  # raise an exception and don't upload that bullshit
    temp = temp * 10
    temp = int(temp)
    temp = temp / 10  # division!
    # strg="field%i" %(i+1)
    strg = "field%i" % sensors[i]
示例#38
0
def test_planet():
    log.verbose('probe initialization')
    with log.Nest('measuring'):
        log.info('stickyness okay')
        log.notice('foggy ground')
    log.warning('exact measurements not possible')
示例#39
0
文件: species.py 项目: gmagoon/RMG-Py
	def load(self, datapath):
		"""
		Load a set of thermodynamics group additivity databases from the general
		database specified at `datapath`.
		"""

		datapath = os.path.abspath(datapath)

		logging.info('Loading thermodynamics databases from %s...' % datapath)

		logging.verbose('Loading functional group thermo database from %s...' % datapath)
		self.groupDatabase.load(datapath + '/thermo/Group_Dictionary.txt', \
			datapath + '/thermo/Group_Tree.txt', \
			datapath + '/thermo/Group_Library.txt')
		
		logging.verbose('Loading 1,5 interactions thermo database from %s...' % datapath)
		self.int15Database.load(datapath + '/thermo/15_Dictionary.txt', \
			datapath + '/thermo/15_Tree.txt', \
			datapath + '/thermo/15_Library.txt')
		
		logging.verbose('Loading gauche interactions thermo database from %s...' % datapath)
		self.gaucheDatabase.load(datapath + '/thermo/Gauche_Dictionary.txt', \
			datapath + '/thermo/Gauche_Tree.txt', \
			datapath + '/thermo/Gauche_Library.txt')
		
		logging.verbose('Loading radical corrections thermo database from %s...' % datapath)
		self.radicalDatabase.load(datapath + '/thermo/Radical_Dictionary.txt', \
			datapath + '/thermo/Radical_Tree.txt', \
			datapath + '/thermo/Radical_Library.txt')
		
		logging.verbose('Loading ring corrections thermo database from %s...' % datapath)
		self.ringDatabase.load(datapath + '/thermo/Ring_Dictionary.txt', \
			datapath + '/thermo/Ring_Tree.txt', \
			datapath + '/thermo/Ring_Library.txt')
		
		logging.verbose('Loading other corrections thermo database from %s...' % datapath)
		self.otherDatabase.load(datapath + '/thermo/Other_Dictionary.txt', \
			datapath + '/thermo/Other_Tree.txt', \
			datapath + '/thermo/Other_Library.txt')

		logging.verbose('Loading primary thermo database from %s...' % datapath)
		self.primaryDatabase.load(datapath + '/thermo/Primary_Dictionary.txt', \
			'', \
			datapath + '/thermo/Primary_Library.txt')
示例#40
0
文件: stuff.py 项目: drprofesq/hmdx
def writetext(filepath, text):
    log.verbose('Writing to text file "%s"' % filepath)
    with open (filepath, 'w') as f:
        f.write(text)
示例#41
0
文件: io.py 项目: athlonshi/RMG-Py
def readInputFile(fstr):
	"""
	Parse an RMG input file at the location `fstr`. If successful, this 
	function returns a :class:`rmg.model.CoreEdgeReactionModel` object and a 
	list of one or more :class:`rmg.model.ReactionSystem` objects.
	"""

	try:
		
		# Parse the RMG input XML file into a DOM tree
		xml0 = XML(path=fstr)

		# Make sure root element is a <rmginput> element
		rootElement = xml0.getRootElement()
		if rootElement.tagName != 'rmginput':
			raise InvalidInputFileException('Incorrect root element; should be <rmginput>.')
		
		# Process option list
		optionList = xml0.getChildElement(rootElement, 'optionList')
		# Process units option
		units = xml0.getChildElementText(optionList, 'units', required=False, default='si')
		pq.set_default_units(units)
		# Read draw molecules option
		drawMolecules = xml0.getChildElement(optionList, 'drawMolecules', required=False)
		settings.drawMolecules = (drawMolecules is not None)
		# Read generate plots option
		generatePlots = xml0.getChildElement(optionList, 'generatePlots', required=False)
		settings.generatePlots = (generatePlots is not None)
		# Read spectral data estimation option
		spectralDataEstimation = xml0.getChildElement(optionList, 'spectralDataEstimation', required=False)
		settings.spectralDataEstimation = (spectralDataEstimation is not None)

		# Read unimolecular reaction network option
		unirxnNetworks = xml0.getChildElement(optionList, 'unimolecularReactionNetworks', required=False)
		if unirxnNetworks is not None:
			# Read method
			method = str(xml0.getChildElementText(unirxnNetworks, 'method', required=True))
			allowed = ['modifiedstrongcollision', 'reservoirstate']
			if method.lower() not in allowed:
				raise InvalidInputFileException('Invalid unimolecular reaction networks method "%s"; allowed values are %s.' % (method, allowed))
			# Read temperatures
			temperatures = xml0.getChildQuantity(unirxnNetworks, 'temperatures', required=False,
				default=pq.Quantity([300.0, 400.0, 500.0, 600.0, 800.0, 1000.0, 1500.0, 2000.0], 'K'))
			temperatures = [float(T.simplified) for T in temperatures]
			# Read pressures
			pressures = xml0.getChildQuantity(unirxnNetworks, 'pressures', required=False,
				default=pq.Quantity([1.0e3, 1.0e4, 1.0e5, 1.0e6, 1.0e7], 'Pa'))
			pressures = [float(P.simplified) for P in pressures]
			# Read grain size
			grainSize = xml0.getChildQuantity(unirxnNetworks, 'grainSize', required=False,
				default=pq.Quantity(0.0, 'J/mol'))
			grainSize = float(grainSize.simplified)
			# Read number of grains
			numberOfGrains = int(xml0.getChildElementText(unirxnNetworks, 'numberOfGrains', required=False, default=0))
			if grainSize == 0.0 and numberOfGrains == 0:
				raise InvalidInputFileException('Must specify a grain size or number of grains for unimolecular reaction networks calculations.')
			# Read interpolation model
			interpolationModel = xml0.getChildElement(unirxnNetworks, 'interpolationModel', required=True)
			modelType = str(xml0.getAttribute(interpolationModel, 'type', required=True))
			allowed = ['none', 'chebyshev', 'pdeparrhenius']
			if modelType.lower() not in allowed:
				raise InvalidInputFileException('Invalid unimolecular reaction networks interpolation model "%s"; allowed values are %s.' % (method, allowed))
			if modelType.lower() == 'chebyshev':
				numTPolys = int(xml0.getChildElementText(interpolationModel, 'numberOfTemperaturePolynomials', required=False, default='4'))
				numPPolys = int(xml0.getChildElementText(interpolationModel, 'numberOfPressurePolynomials', required=False, default='4'))
				interpolationModel = (modelType, numTPolys, numPPolys)
			else:
				interpolationModel = (modelType)
			settings.unimolecularReactionNetworks = (method, temperatures, pressures, grainSize, numberOfGrains, interpolationModel)
		else:
			settings.unimolecularReactionNetworks = None

		# Create an empty reaction model
		reactionModel = model.CoreEdgeReactionModel()

		# Load databases
		databases = readDatabaseList(xml0, rootElement)
		for database in databases:
			if database[1] == 'general':
				logging.verbose('General database: ' + database[2])
				# Load all databases
				loadThermoDatabase(database[2] + os.sep)
				loadKineticsDatabase(database[2] + os.sep)
				loadFrequencyDatabase(database[2])
			elif database[1] == 'seedmechanism':
				logging.verbose('Seed mechanism: ' + database[2])
				reactionModel.loadSeedMechanism(database[2])
			logging.verbose('')
			
		# Process species
		coreSpecies = []; speciesDict = {}
		speciesList = xml0.getChildElement(rootElement, 'speciesList')
		speciesElements = xml0.getChildElements(speciesList, 'species')
		logging.info('Found ' + str(len(speciesElements)) + ' species')
		for element in speciesElements:
			
			# Load species ID
			sid = str(xml0.getAttribute(element, 'id', required=True))

			# Load the species data from the file
			spec = species.Species()
			spec.fromXML(xml0, element)

			# Check that the species isn't already in the core (e.g. from a seed mechanism)
			existingSpecies = None
			for s in reactionModel.core.species:
				if s.isIsomorphic(spec):
					existingSpecies = s
					break

			if existingSpecies is not None:
				# Point to existing species rather than newly created species
				# This means that any information about the species in the
				# input file will be discarded in favor of the existing species
				# data
				spec = existingSpecies
			else:
				# Handle other aspects of RMG species creation
				logging.verbose('Creating new species %s' % str(spec))
				species.processNewSpecies(spec)

			# All species in RMG input file are immediately added to the core
			coreSpecies.append(spec)

			# Add to local species dictionary (for matching with other parts of file)
			speciesDict[sid] = spec

		logging.verbose('')
		
		# Read model flux tolerance
		fluxTolerance = xml0.getChildElement(rootElement, 'fluxTolerance')
		reactionModel.fluxToleranceKeepInEdge = float(xml0.getChildElementText(fluxTolerance, 'keepInEdge'))
		reactionModel.fluxToleranceMoveToCore = float(xml0.getChildElementText(fluxTolerance, 'moveToCore'))
		reactionModel.fluxToleranceInterrupt = float(xml0.getChildElementText(fluxTolerance, 'interruptSimulation'))
		
		logging.debug('Model flux tolerances set to:')
		logging.debug('\tKeep in edge:         %s' % (reactionModel.fluxToleranceKeepInEdge) )
		logging.debug('\tMove to core:         %s' % (reactionModel.fluxToleranceMoveToCore) )
		logging.debug('\tInterrupt simulation: %s' % (reactionModel.fluxToleranceInterrupt) )
		logging.debug('')
		
		# Read maximum model size
		maxModelSize = xml0.getChildElement(rootElement, 'maximumModelSize')
		if maxModelSize is None:
			logging.debug('Maximum model size is not set')
		else:
			reactionModel.maximumEdgeSpecies = int(xml0.getChildElementText(maxModelSize, 'edgeSpecies'))
			logging.debug('Maximum model size set to:')
			logging.debug('\tEdge species:         %s' % (reactionModel.maximumEdgeSpecies) )
		logging.debug('')

		# Read dynamic simulator
		element = xml0.getChildElement(rootElement, 'simulator')
		reactionModel.absoluteTolerance = float(xml0.getAttribute(element, 'atol'))
		reactionModel.relativeTolerance = float(xml0.getAttribute(element, 'rtol'))
		logging.info('Read dynamic simulator')
		logging.debug('Simulator:')
		logging.debug('\tAbsolute tolerance set to %s' % (reactionModel.absoluteTolerance))
		logging.debug('\tRelative tolerance set to %s' % (reactionModel.relativeTolerance))
		logging.debug('')

		# Read termination targets
		termination = xml0.getChildElement(rootElement, 'termination')
		targetElements = xml0.getChildElements(termination, 'target')
		for element in targetElements:

			targetType = xml0.getAttribute(element, 'type')
			if targetType == 'conversion':
				sid = xml0.getAttribute(element, 'speciesID')
				spec = speciesDict[sid]
				conv = float(xml0.getElementText(element))
				if conv < 0.0 or conv > 1.0:
					raise InvalidInputFileException('Invalid value for termination fractional conversion.')
				reactionModel.termination.append(model.TerminationConversion(spec, conv))
			elif targetType == 'time':
				units = str(xml0.getAttribute(element, 'units'))
				time = float(xml0.getElementText(element))
				time = pq.Quantity(time, units); time = float(time.simplified)
				if time < 0.0:
					raise InvalidInputFileException('Invalid value for termination time.')
				reactionModel.termination.append(model.TerminationTime(time))
			else:
				raise InvalidInputFileException('Invalid termination target type "'+targetType+'".')
		if len(reactionModel.termination) == 0:
			raise InvalidInputFileException('No termination targets specified.')

		# Output info about termination targets
		if len(reactionModel.termination) == 1:
			logging.info('Found ' + str(len(reactionModel.termination)) + ' termination target')
		else:
			logging.info('Found ' + str(len(reactionModel.termination)) + ' termination targets')
		for index, target in enumerate(reactionModel.termination):
			string = '\tTermination target #' + str(index+1) + ': '
			if target.__class__ == model.TerminationConversion:
				string += 'conversion ' + str(target.species) + ' ' + str(target.conversion)
			elif target.__class__ == model.TerminationTime:
				string += 'time ' + str(target.time)
			logging.debug(string)	
				
		logging.debug('')

		# Get list of available reaction systems
		import system as systemModule
		availableSystems = systemModule.getAvailableReactionSystems()
		
		# Process reaction systems
		reactionSystems = []
		reactionSystemList = xml0.getChildElement(rootElement, 'reactionSystemList')
		systemElements = xml0.getChildElements(reactionSystemList, 'reactionSystem')
		for systemElement in systemElements:
		
			# Determine the class of reaction system
			rsClass = xml0.getAttribute(systemElement, 'class')
			if rsClass not in availableSystems:
				raise InvalidInputFileException('Reaction system class "%s" not available.' % (rsClass))
		
			# Declare the reaction system and populate it with info
			reactionSystem = availableSystems[rsClass]()
			reactionSystem.fromXML(xml0, systemElement, speciesDict)
			reactionSystem.initializeCantera()
			
			# Append to the list of reaction systems
			reactionSystems.append(reactionSystem)

		# Output info about reaction system
		if len(reactionSystems) == 1:
			logging.info('Found ' + str(len(reactionSystems)) + ' reaction system')
		else:
			logging.info('Found ' + str(len(reactionSystems)) + ' reaction systems')
		for index, reactionSystem in enumerate(reactionSystems):
			logging.debug('Reaction system #%i: %s' % (index+1, reactionSystem))
				
		logging.debug('')
			
		# Cleanup the DOM tree when finished
		xml0.cleanup()
		
	except InvalidInputFileException, e:
		logging.exception(str(e))
		raise e
示例#42
0
文件: model.py 项目: gmagoon/RMG-Py
	def updateUnimolecularReactionNetworks(self):
		"""
		Iterate through all of the currently-existing unimolecular reaction
		networks, updating those that have been marked as invalid. In each update,
		the phenomonological rate coefficients :math:`k(T,P)` are computed for
		each net reaction in the network, and the resulting reactions added or
		updated.
		"""

		from unirxn.network import Isomer, UnirxnNetworkException
		from reaction import PDepReaction
		from kinetics import ChebyshevKinetics, PDepArrheniusKinetics

		count = sum([1 for network in self.unirxnNetworks if not network.valid])
		logging.info('Updating %i modified unimolecular reaction networks...' % count)

		for network in self.unirxnNetworks:
			if not network.valid:

				logging.verbose('Updating unimolecular reaction network %s' % network.id)

				# Other inputs
				method, Tlist, Plist, grainSize, numGrains, model = settings.unimolecularReactionNetworks

				network.bathGas = [spec for spec in self.core.species if not spec.reactive][0]
				network.bathGas.expDownParam = 4.86 * 4184

				# Generate isomers
				for reaction in network.pathReactions:

					# Create isomer for the reactant
					isomer = None
					for isom in network.isomers:
						if all([spec in isom.species for spec in reaction.reactants]):
							isomer = isom
					if isomer is None:
						isomer = Isomer(reaction.reactants)
						network.isomers.append(isomer)
					reaction.reactant = isomer

					# Create isomer for the product
					isomer = None
					for isom in network.isomers:
						if all([spec in isom.species for spec in reaction.products]):
							isomer = isom
					if isomer is None:
						isomer = Isomer(reaction.products)
						network.isomers.append(isomer)
					reaction.product = isomer

				# Update list of explored isomers to include all species in core
				for isom in network.isomers:
					if isom.isUnimolecular():
						spec = isom.species[0]
						if spec not in network.explored:
							if spec in self.core.species:
								network.explored.append(spec)

				# Remove any isomers that aren't found in any path reactions
				# Ideally this block of code wouldn't be needed, but it's here
				# just in case
				isomerList = []
				for isomer in network.isomers:
					found = False
					for reaction in network.pathReactions:
						if reaction.reactant is isomer or reaction.product is isomer:
							found = True
							break
					if not found:
						isomerList.append(isomer)
				if len(isomerList) > 0:
					logging.debug('Removed %i isomer(s) from network %i.' % (len(isomerList), network.id))
					for isomer in isomerList: network.isomers.remove(isomer)

				# Sort isomers so that all unimolecular isomers come first
				isomers = [isom for isom in network.isomers if isom.isUnimolecular()]
				isomers.extend([isom for isom in network.isomers if isom.isMultimolecular()])
				network.isomers = isomers

				# Get list of species in network
				speciesList = network.getSpeciesList()

				# Calculate ground-state energy of all species in network
				# For now we assume that this is equal to the enthalpy of formation
				# of the species
				for spec in speciesList:
					spec.E0 = spec.getEnthalpy(T=298)

				# Determine isomer ground-state energies
				for isomer in network.isomers:
					isomer.E0 = sum([spec.E0 for spec in isomer.species])
				# Determine transition state ground-state energies of the reactions
				for reaction in network.pathReactions:
					E0 = sum([spec.E0 for spec in reaction.reactants])
					reaction.E0 = E0 + reaction.kinetics[0].getActivationEnergy(reaction.getEnthalpyOfReaction(T=298))

				# Shift network such that lowest-energy isomer has a ground state of 0.0
				network.shiftToZeroEnergy()

				# Determine energy grains
				Elist = network.determineEnergyGrains(grainSize, numGrains, max(Tlist))

				# Calculate density of states for all isomers in network
				network.calculateDensitiesOfStates(Elist)

				# Determine phenomenological rate coefficients
				K = network.calculateRateCoefficients(Tlist, Plist, Elist, method)

				# Generate PDepReaction objects
				for i, product in enumerate(network.isomers):
					for j, reactant in enumerate(network.isomers[0:i]):
						if numpy.any(K[:,:,i,j]):
							if not numpy.all(K[:,:,i,j]):
								raise UnirxnNetworkException('Zero rate coefficient encountered while updating network %s.' % network)
							# Find the path reaction
							netReaction = None
							for r in network.netReactions:
								if r.hasTemplate(reactant.species, product.species):
									netReaction = r
							# If path reaction does not already exist, make a new one
							if netReaction is None:
								netReaction = PDepReaction(reactant.species, product.species, network, None)
								network.netReactions.append(netReaction)
								self.addReactionToEdge(netReaction)
							# Set its kinetics using interpolation model
							if model[0].lower() == 'chebyshev':
								modelType, degreeT, degreeP = model
								chebyshev = ChebyshevKinetics()
								chebyshev.fitToData(Tlist, Plist, K[:,:,i,j], degreeT, degreeP)
								netReaction.kinetics = chebyshev
							elif model.lower() == 'pdeparrhenius':
								pDepArrhenius = PDepArrheniusKinetics()
								pDepArrhenius.fitToData(Tlist, Plist, K[:,:,i,j])
								netReaction.kinetics = pDepArrhenius
							else:
								pass

							# Update cantera if this is a core reaction
							if netReaction in self.core.reactions:
								netReaction.toCantera()

				for spec in speciesList:
					del spec.E0
				for reaction in network.pathReactions:
					del reaction.reactant
					del reaction.product
					del reaction.E0

				network.valid = True
示例#43
0
文件: cli.py 项目: ASPP/bloscpack
def main():
    parser = create_parser()
    log.set_prefix(parser.prog)
    args = parser.parse_args()
    if args.verbose:
        log.LEVEL = log.VERBOSE
    elif args.debug:
        log.LEVEL = log.DEBUG
    log.debug('command line argument parsing complete')
    log.debug('command line arguments are: ')
    for arg, val in vars(args).iteritems():
        log.debug('    %s: %s' % (arg, str(val)))
    process_nthread_arg(args)

    # compression and decompression handled via subparsers
    if args.subcommand in ['compress', 'c']:
        log.verbose('getting ready for compression')
        in_file, out_file, blosc_args = process_compression_args(args)
        try:
            check_files(in_file, out_file, args)
        except FileNotFound as fnf:
            log.error(str(fnf))
        metadata = process_metadata_args(args)
        bloscpack_args = BloscpackArgs(offsets=args.offsets,
                                       checksum=args.checksum)
        try:
            pack_file(in_file, out_file, chunk_size=args.chunk_size,
                      metadata=metadata,
                      blosc_args=blosc_args,
                      bloscpack_args=bloscpack_args,
                      metadata_args=MetadataArgs())
        except ChunkingException as ce:
            log.error(str(ce))
    elif args.subcommand in ['decompress', 'd']:
        log.verbose('getting ready for decompression')
        in_file, out_file = process_decompression_args(args)
        try:
            check_files(in_file, out_file, args)
        except FileNotFound as fnf:
            log.error(str(fnf))
        try:
            metadata = unpack_file(in_file, out_file)
            if metadata:
                log_metadata(metadata)
        except FormatVersionMismatch as fvm:
            log.error(fvm.message)
        except ChecksumMismatch as csm:
            log.error(csm.message)
    elif args.subcommand in ['append', 'a']:
        log.verbose('getting ready for append')
        original_file, new_file = process_append_args(args)
        try:
            if not path.exists(original_file):
                raise FileNotFound("original file '%s' does not exist!" %
                                   original_file)
            if not path.exists(new_file):
                raise FileNotFound("new file '%s' does not exist!" %
                                   new_file)
        except FileNotFound as fnf:
            log.error(str(fnf))
        log.verbose("original file is: '%s'" % original_file)
        log.verbose("new file is: '%s'" % new_file)
        blosc_args = _blosc_args_from_args(args)
        metadata = process_metadata_args(args)
        append(original_file, new_file, blosc_args=blosc_args)
        if metadata is not None:
            with open(original_file, 'r+b') as fp:
                _seek_to_metadata(fp)
                _rewrite_metadata_fp(fp, metadata)
    elif args.subcommand in ('info', 'i'):
        try:
            if not path.exists(args.file_):
                raise FileNotFound("file '%s' does not exist!" %
                                   args.file_)
        except FileNotFound as fnf:
            log.error(str(fnf))
        try:
            with open(args.file_) as fp:
                bloscpack_header, metadata, metadata_header, offsets = \
                    _read_beginning(fp)
        except ValueError as ve:
            log.error(str(ve) + "\n" +
                      "This might not be a bloscpack compressed file.")
        log.normal(bloscpack_header.pformat())
        if offsets:
            log.normal("'offsets':")
            log.normal("[%s,...]" % (",".join(str(o) for o in offsets[:5])))
        if metadata is not None:
            log_metadata(metadata)
            log.normal(metadata_header.pformat())

    else:  # pragma: no cover
        # we should never reach this
        log.error('You found the easter-egg, please contact the author')
    log.verbose('done')
示例#44
0
文件: cli.py 项目: ASPP/bloscpack
def process_nthread_arg(args):
    """ Extract and set nthreads. """
    if args.nthreads != blosc.ncores:
        blosc.set_nthreads(args.nthreads)
    log.verbose('using %d thread%s' %
                (args.nthreads, 's' if args.nthreads > 1 else ''))
示例#45
0
文件: stuff.py 项目: drprofesq/hmdx
def readtext(filepath):
    log.verbose('Reading file "%s" as text' % filepath)
    with open (filepath, 'r') as f:
        return f.read()
示例#46
0
	def load(self, datapath):
		"""
		Load a set of thermodynamics group additivity databases from the general
		database specified at `datapath`.
		"""
		datapath = os.path.join(datapath,'thermo_groups')
		datapath = os.path.abspath(datapath)
		def DTLpaths(prefix):
			"""Get a tuple of Dictionary, Tree, and Library paths for a given prefix"""
			dict_path = os.path.join(datapath, prefix+'_Dictionary.txt')
			tree_path = os.path.join(datapath, prefix+'_Tree.txt')
			libr_path = os.path.join(datapath, prefix+'_Library.txt')
			return dict_path, tree_path, libr_path

		logging.info('Loading thermodynamics databases from %s...' % datapath)

		logging.verbose('Loading functional group thermo database from %s...' % datapath)
		self.groupDatabase.load(*DTLpaths('Group')) # the '*' unpacks the tuple into three separate arguments
		
		logging.verbose('Loading 1,5 interactions thermo database from %s...' % datapath)
		self.int15Database.load(*DTLpaths('15'))
		
		logging.verbose('Loading gauche interactions thermo database from %s...' % datapath)
		self.gaucheDatabase.load(*DTLpaths('Gauche'))
		
		logging.verbose('Loading radical corrections thermo database from %s...' % datapath)
		self.radicalDatabase.load(*DTLpaths('Radical'))
		
		logging.verbose('Loading ring corrections thermo database from %s...' % datapath)
		self.ringDatabase.load(*DTLpaths('Ring'))
		
		logging.verbose('Loading other corrections thermo database from %s...' % datapath)
		self.otherDatabase.load(*DTLpaths('Other'))

		logging.verbose('Loading primary thermo database from %s...' % datapath)
		self.primaryDatabase.load(os.path.join(datapath, 'Primary_Dictionary.txt'), \
			'', \
			os.path.join(datapath, 'Primary_Library.txt'))
示例#47
0
    def build(self):
        finalout = self.get_finalout_path()
        if os.path.exists(finalout):
            log.verbose("package '{}' is already built".format(self.name))
        else:
            self.build_ins()

            log.log(
                "--------------------------------------------------------------------------------"
            )
            log.log("BUILD   {}".format(finalout))
            # stage dir is where the files live during the build process
            stage_dir = self.get_stage_path()
            # tmpout dir is where the files are written to temporarily
            tmpout = self.get_tmpout_path()
            if os.path.exists(stage_dir):
                sys.exit(
                    "Error: stage directory '{}' already exists (TODO: handle this)"
                    .format(stage_dir))
            if os.path.exists(tmpout):
                sys.exit(
                    "Error: temporary output path '{}' already exists (TODO: handle this)"
                    .format(tmpout))
            log.verbose("mkdir '{}'".format(stage_dir))
            os.mkdir(stage_dir)

            save_cwd = os.getcwd()
            os.chdir(stage_dir)
            log.verbose("executing '{}' ops...".format(len(self.raw_ops)))
            executor = ops.OpExecutor()
            for op in self.raw_ops:
                if not type(op) is dict:
                    sys.exit(
                        "Error: in {}, got an op that is not an object but is {}"
                        .format(self.name, type(op)))
                executor.execute(
                    self,
                    self.process_dict(op, ProcessState.BUILDING,
                                      ProcessErrorContext()))
            os.chdir(save_cwd)

            if not os.path.exists(tmpout):
                sys.exit(
                    "Error: operations did not generate anything in '@tmpout' ({})"
                    .format(tmpout))
            if self.ifaces:
                for iface in self.ifaces:
                    iface_path = os.path.join(tmpout, "genesis", "i", iface)
                    if os.name == 'nt':
                        # TODO: for now we only support bat files here
                        iface_path += '.bat'
                    if not os.path.exists(iface_path):
                        raise Exception(
                            "genesis object '{}' implements interface '{}' but did not create file '{}'"
                            .format(self.name, iface, iface_path))

            log.verbose("moving tmpout to final out")
            os.rename(tmpout, finalout)

            if not global_keep_stage:
                log.rmtree(stage_dir)
            log.log(
                "--------------------------------------------------------------------------------"
            )
        return finalout