コード例 #1
0
ファイル: example.py プロジェクト: pombredanne/satcli
    def ex1(self, cli_opts, cli_args):
        """
        This is how to expose a subcommand because it will be under the 
        'example' namespace.  You would access this subcommand as:
    
            $ satcli example ex1
        
        """

        # You can get the root application config like this:
        config = get_config("root")

        # Or you can get your example namespace config like this:
        config = get_config("example")

        # You can print or log output however you like since this function
        # does not render out to a template.

        # Commands are all passed the cli_opts, cli_args from the command line.
        # So if you have added cli options in your satcli.bootstrap.example
        # file, you could access them here as:
        #
        #   cli_opts.<your_option>
        #   cli_args[0] # first argument *after* your command
        #

        # Here we show how to run a hook that we've defined in
        # satcli.bootstrap.example:
        for res in run_hooks("my_example_hook"):
            print res

        # This command has no template, but if we return something we
        # can still access the json output via --json.
        return dict(foo="bar")
コード例 #2
0
ファイル: smtp.py プロジェクト: dev-ace/ius-tools
def send_mail(to_addr, subject, message):
    log = get_logger(__name__)
    log.debug('Sending mail to %s - %s' % (to_addr, subject))
    config = get_config()
    from_addr = config['admin']['smtp_from']
    subject_prefix = config['admin']['smtp_subject_prefix']
    host = config['admin']['smtp_host']
    port = config['admin']['smtp_port']
    user = config['admin']['smtp_user']
    password = config['admin']['smtp_password']
    tls = config['admin']['smtp_tls']
    key = config['admin']['smtp_keyfile']
    cert = config['admin']['smtp_certfile']
    try:
        smtp = smtplib.SMTP(host, port)
        if tls:
            smtp.starttls(keyfile, certfile)

        if user:
            smtp.login(user, password)

        msg = "From: %s\r\n" % from_addr
        msg = msg + "To: %s\r\n" % to_addr
        msg = msg + "Subject: %s%s\r\n" % (subject_prefix, subject)
        msg = msg + message
        smtp.sendmail(from_addr, to_addr, msg)
    except socket.error, e:
        log.error("unable to send email - %s %s" % (e.args[0], e.args[1]))
コード例 #3
0
ファイル: appmain.py プロジェクト: nixar/satcli
def main():
    try:
        ensure_api_compat(__name__, REQUIRED_CEMENT_API)    
        lay_cement(config=default_config, banner=BANNER)
    
        log = get_logger(__name__)
        log.debug("Cement Framework Initialized!")

        if not len(sys.argv) > 1:
            sys.argv.append('default')

        config = get_config()
        
        # create the lock file
        if os.path.exists(config['lockfile']):
            raise SatCLIRuntimeError, \
                "lock file exists, is satcli already running?"
        else:
            f = open(config['lockfile'], 'w+')
            f.write(get_timestamp())
            f.close()

        run_command(sys.argv[1])
            
    except CementArgumentError, e:
        print("CementArgumentError > %s" % e)
        sys.exit(e.code)
コード例 #4
0
ファイル: smtp.py プロジェクト: pombredanne/ius-tools
def send_mail(to_addr, subject, message):
    log = get_logger(__name__)
    log.debug('Sending mail to %s - %s' % (to_addr, subject))
    config = get_config()
    from_addr = config['admin']['smtp_from']
    subject_prefix = config['admin']['smtp_subject_prefix']
    host = config['admin']['smtp_host']
    port = config['admin']['smtp_port']
    user = config['admin']['smtp_user']
    password = config['admin']['smtp_password']
    tls = config['admin']['smtp_tls']
    key = config['admin']['smtp_keyfile']
    cert = config['admin']['smtp_certfile']
    try:
        smtp = smtplib.SMTP(host, port)
        if tls:
            smtp.starttls(keyfile, certfile)

        if user:
            smtp.login(user, password)

        msg = "From: %s\r\n" % from_addr
        msg = msg + "To: %s\r\n" % to_addr
        msg = msg + "Subject: %s%s\r\n" % (subject_prefix, subject)
        msg = msg + message
        smtp.sendmail(from_addr, to_addr, msg)
    except socket.error, e:
        log.error("unable to send email - %s %s" % (e.args[0], e.args[1]))
コード例 #5
0
ファイル: repo.py プロジェクト: pombredanne/ius-tools
    def build_metadata(self):
        """
        Generate Yum metadata in each director of path_list.

        """
        path_list = self.get_repo_paths(self.local_path)

        log.info("Generating repository metadata")
        config = get_config()
        start = len(config['admin']['repo_base_path'].split('/'))
        for path in path_list:
            log.info("  `-> %s" % '/'.join(path.split('/')[start:]))
            if 'debuginfo' in path.split('/'):
                os.system('%s -d -s md5 %s >/dev/null' % \
                    (self.config['admin']['createrepo_binpath'], path))
            else:
                os.system('%s -x debuginfo -d -s md5 %s >/dev/null' % \
                    (self.config['admin']['createrepo_binpath'], path))

            # run yum-arch for el4 repos
            if path.find('Redhat/4/') > 0:
                os.system('%s %s >/dev/null 2>&1' % \
                    (self.config['admin']['yumarch_binpath'], path))

            # add repoview
            if 'debuginfo' in path.split('/'):
                os.system('%s %s >/dev/null 2>&1' % \
                         (self.config['admin']['repoview_binpath'], path))
            else:
                os.system('%s -i *debuginfo* %s >/dev/null 2>&1' % \
                         (self.config['admin']['repoview_binpath'], path))
コード例 #6
0
ファイル: repo.py プロジェクト: dev-ace/ius-tools
    def build_metadata(self):
        """
        Generate Yum metadata in each director of path_list.

        """
        path_list = self.get_repo_paths(self.local_path)
            
        log.info("Generating repository metadata")
        config = get_config()
        start = len(config['admin']['repo_base_path'].split('/'))
        for path in path_list:
            log.info("  `-> %s" % '/'.join(path.split('/')[start:]))
            if 'debuginfo' in path.split('/'):
                os.system('%s -d -s md5 %s >/dev/null' % \
                    (self.config['admin']['createrepo_binpath'], path))
            else:
                os.system('%s -x debuginfo -d -s md5 %s >/dev/null' % \
                    (self.config['admin']['createrepo_binpath'], path))
            
            # run yum-arch for el4 repos
            if path.find('Redhat/4/') > 0:
                os.system('%s %s >/dev/null 2>&1' % \
                    (self.config['admin']['yumarch_binpath'], path))
                    
            # add repoview
            if 'debuginfo' in path.split('/'):
                os.system('%s %s >/dev/null 2>&1' % \
                         (self.config['admin']['repoview_binpath'], path))
            else:
                os.system('%s -i *debuginfo* %s >/dev/null 2>&1' % \
                         (self.config['admin']['repoview_binpath'], path))
コード例 #7
0
ファイル: mirror.py プロジェクト: pombredanne/satcli
    def __init__(self, label):
        self.config = get_config("mirror")
        self.label = label
        self.synced_files = []
        self.attempted_files = []
        self.modified = False
        self.local_dir = re.sub("\%\(mirror_dir\)", self.config["mirror_dir"], self.config[self.label]["path"])
        self.channel = g.proxy.query(model.Channel, just_one=True, all_data=True, label=label)
        # get the package list
        if self.config[self.label]["only_latest"]:
            call_path = "channel.software.listLatestPackages"
        else:
            call_path = "channel.software.listAllPackages"
        self.packages = g.proxy.call(call_path, self.label)

        # base mirror config
        run_createrepo = self.config.get("run_createrepo", None)
        run_yumarch = self.config.get("run_yumarch", None)
        only_latest = self.config.get("only_latest", None)

        # per channel config
        self.run_createrepo = self.config[self.label].get("run_createrepo", run_createrepo)
        self.run_yumarch = self.config[self.label].get("run_yumarch", run_yumarch)
        self.only_latest = self.config[self.label].get("only_latest", only_latest)

        # create out local dir if missing
        if not os.path.exists(self.local_dir):
            os.makedirs(self.local_dir)
コード例 #8
0
ファイル: mirror.py プロジェクト: nixar/satcli
def validate_config_hook(*args, **kwargs):
    config = get_config('mirror')
    required_settings = ['mirror_dir']
    for s in required_settings:
        if not config.has_key(s):
            raise CementConfigError, "config['mirror']['%s'] value missing!" % s
    
    if not os.path.exists(config['mirror_dir']):
        os.makedirs(config['mirror_dir'])
コード例 #9
0
ファイル: controller.py プロジェクト: dev-ace/ius-tools
 def _abort_on_api_error(self, errors={}):
     config = get_config()
     if len(errors) > 0:
         if config['output_handler'] == 'json':
             run_controller_command('root', 'api_error_json',
                                    errors=errors, cli_opts=self.cli_opts,
                                    cli_args=self.cli_args)
         else:
             run_controller_command('root', 'api_error', errors=errors,
                                    cli_opts=self.cli_opts,
                                    cli_args=self.cli_args)
         sys.exit(1)
コード例 #10
0
    def process_tag(self, tag_label):
        config = get_config()
        log.info("Processing tag %s" % tag_label)
        res = self._wrap(self.mf.tag.get_one, "%s-candidate" % tag_label,
                         'ius')
        from_tag = res['data']['tag']

        res = self._wrap(self.mf.tag.get_one, tag_label, 'ius')
        to_tag = res['data']['tag']

        old_tag = None
        if tag_label == 'stable':
            res = self._wrap(self.mf.tag.get_one, 'archive', 'ius')
            old_tag = res['data']['tag']

        res = self._wrap(self.mf.tag.move_builds, from_tag['label'], 'ius',
                         to_tag['label'])
        untagged_builds = res['data']['untagged_builds']
        moved_builds = res['data']['moved_builds']

        if not len(moved_builds) > 0:
            return

        # if tagging to stable, remove all other tags
        if to_tag['label'] == 'stable':
            res = self._wrap(self.mf.tag.get_all, dict(project_label='ius'))
            all_tags = res['data']['tags']
            for build in moved_builds:
                for _tag in all_tags:
                    if _tag['label'] == 'stable':
                        continue
                    res = self._wrap(self.mf.build.untag, build, 'ius',
                                     _tag['label'])

        # if there were older untagged_builds move them to old_tag
        if old_tag and len(untagged_builds) > 0:
            msg = TAG_AND_UNTAG_MSG % (to_tag['label'], "\n\r    - ".join(
                moved_builds), "\n\r    - ".join(untagged_builds), FOOTER)
            for old_label in untagged_builds:
                res = self._wrap(self.mf.build.tag, old_label, 'ius',
                                 old_tag['label'])
        else:
            msg = TAG_MSG % (to_tag['label'], "\n\r   - ".join(moved_builds),
                             FOOTER)

        for build in moved_builds:
            log.info("  `-> %s" % build)

        send_mail(config['admin']['announce_email'],
                  "new builds moved to tag '%s'" % to_tag['label'], msg)
コード例 #11
0
ファイル: mirror.py プロジェクト: pombredanne/satcli
    def _mirror_channel(self, channel):
        config = get_config("mirror")
        repo = LocalRepo(channel)

        try:
            repo.sync(verify=self.cli_opts.verify, force=self.cli_opts.force)
        except KeyboardInterrupt, e:
            log.warn("Caught KeyboardInterrupt => Attempting to exit clean...")
            # remove the last file attempted
            if len(repo.attempted_files) > 0:
                last_path = os.path.join(repo.local_dir, repo.attempted_files[-1])
                if os.path.exists(last_path):
                    log.debug("cleanup: removing last attempted file %s" % last_path)
                    os.remove(last_path)
            raise SatCLIRuntimeError, "Caught KeyboardInterrupt"
コード例 #12
0
ファイル: mirror.py プロジェクト: pombredanne/satcli
    def sync(self, *args, **kw):
        config = get_config("mirror")

        if not self.cli_opts.channel:
            raise SatCLIArgumentError, "Must pass a channel label (or 'all')"

        if self.cli_opts.channel == "all":
            for c in config.sections:
                self._mirror_channel(c)
        else:
            if self.cli_opts.channel in config.sections:
                self._mirror_channel(self.cli_opts.channel)
            else:
                raise SatCLIArgumentError, "channel '%s' doesn't exist in the config." % self.cli_opts.channel
                sys.exit(1)
コード例 #13
0
ファイル: admin.py プロジェクト: iuscommunity/ius-tools
    def process_tag(self, tag_label):
        config = get_config()
        log.info("Processing tag %s" % tag_label)
        res = self._wrap(self.mf.tag.get_one, "%s-candidate" % tag_label, "ius")
        from_tag = res["data"]["tag"]

        res = self._wrap(self.mf.tag.get_one, tag_label, "ius")
        to_tag = res["data"]["tag"]

        old_tag = None
        if tag_label == "stable":
            res = self._wrap(self.mf.tag.get_one, "archive", "ius")
            old_tag = res["data"]["tag"]

        res = self._wrap(self.mf.tag.move_builds, from_tag["label"], "ius", to_tag["label"])
        untagged_builds = res["data"]["untagged_builds"]
        moved_builds = res["data"]["moved_builds"]

        if not len(moved_builds) > 0:
            return

        # if tagging to stable, remove all other tags
        if to_tag["label"] == "stable":
            res = self._wrap(self.mf.tag.get_all, dict(project_label="ius"))
            all_tags = res["data"]["tags"]
            for build in moved_builds:
                for _tag in all_tags:
                    if _tag["label"] == "stable":
                        continue
                    res = self._wrap(self.mf.build.untag, build, "ius", _tag["label"])

        # if there were older untagged_builds move them to old_tag
        if old_tag and len(untagged_builds) > 0:
            msg = TAG_AND_UNTAG_MSG % (
                to_tag["label"],
                "\n\r    - ".join(moved_builds),
                "\n\r    - ".join(untagged_builds),
                FOOTER,
            )
            for old_label in untagged_builds:
                res = self._wrap(self.mf.build.tag, old_label, "ius", old_tag["label"])
        else:
            msg = TAG_MSG % (to_tag["label"], "\n\r   - ".join(moved_builds), FOOTER)

        for build in moved_builds:
            log.info("  `-> %s" % build)

        send_mail(config["admin"]["announce_email"], "new builds moved to tag '%s'" % to_tag["label"], msg)
コード例 #14
0
ファイル: admin.py プロジェクト: iuscommunity/ius-tools
    def gen_repo(self):
        config = get_config()
        if self.cli_opts.sign:
            passphrase = self.cli_opts.gpg_passphrase
            if not passphrase:
                passphrase = get_input("GPG Key Passphrase: ", suppress=True)

            repo = IUSRepo(config, self.mf, sign=True, gpg_passphrase=passphrase)
        else:
            repo = IUSRepo(config, self.mf)

        if self.cli_opts.clean:
            repo.clean()

        repo.get_files()
        repo.build_metadata()
コード例 #15
0
 def _abort_on_api_error(self, errors={}):
     config = get_config()
     if len(errors) > 0:
         if config['output_handler'] == 'json':
             run_controller_command('root',
                                    'api_error_json',
                                    errors=errors,
                                    cli_opts=self.cli_opts,
                                    cli_args=self.cli_args)
         else:
             run_controller_command('root',
                                    'api_error',
                                    errors=errors,
                                    cli_opts=self.cli_opts,
                                    cli_args=self.cli_args)
         sys.exit(1)
コード例 #16
0
ファイル: admin.py プロジェクト: jness/ius-tools
    def push_to_public(self):
        config = get_config()
        log.info("pushing changes to %s" % config['admin']['remote_rsync_path'])
        if self.cli_opts.delete:
            os.system('%s -az --delete %s/ius/ %s/ius/ --exclude %s >/dev/null' % \
                     (config['admin']['rsync_binpath'],
                      config['admin']['repo_base_path'],
                      config['admin']['remote_rsync_path'],
                      config['admin']['remote_exclude']))
        else:
            os.system('%s -az %s/ius/ %s/ius/ --exclude %s >/dev/null' % \
                     (config['admin']['rsync_binpath'],
                      config['admin']['repo_base_path'],
                      config['admin']['remote_rsync_path'],
                      config['admin']['remote_exclude']))

        # Internal IUS Push
        if config['admin']['internal_remote_rsync_path']:

            # remove any excludes if configured
            if config['admin']['internal_remote_exclude']:
                for exclude in config['admin']['internal_remote_exclude']:
                    log.info("removing %s from %s" % (exclude, config['admin']['repo_base_path']))
                    for dirs in os.walk('%s/ius/' % os.path.expanduser(config['admin']['repo_base_path'])):
                        if exclude in ', '.join(dirs[2]):
                            for f in dirs[2]:
                                if exclude in f:
                                    os.remove('%s/%s' % (dirs[0], f))

                # rebuild our meta data now that
                # files have been removed
                repo = IUSRepo(config, self.mf)
                repo.build_metadata()

            log.info("pushing changes to %s" % config['admin']['internal_remote_rsync_path'])
            if self.cli_opts.delete:
                os.system('%s -az --delete %s/ius/ %s/ >/dev/null' % \
                         (config['admin']['rsync_binpath'],
                          config['admin']['repo_base_path'],
                          config['admin']['internal_remote_rsync_path']))
            else:
                os.system('%s -az %s/ius/ %s/ >/dev/null' % \
                         (config['admin']['rsync_binpath'],
                          config['admin']['repo_base_path'],
                          config['admin']['internal_remote_rsync_path']))
コード例 #17
0
    def gen_repo(self):
        config = get_config()
        if self.cli_opts.sign:
            passphrase = self.cli_opts.gpg_passphrase
            if not passphrase:
                passphrase = get_input("GPG Key Passphrase: ", suppress=True)

            repo = IUSRepo(config,
                           self.mf,
                           sign=True,
                           gpg_passphrase=passphrase)
        else:
            repo = IUSRepo(config, self.mf)

        if self.cli_opts.clean:
            repo.clean()

        repo.get_files()
        repo.build_metadata()
コード例 #18
0
ファイル: channel.py プロジェクト: nixar/satcli
 def push(self, *args, **kw):
     config = get_config()
     if not self.cli_opts.label:
         raise SatCLIArgumentError, 'channel -l/--label required'
     
     channels = self.cli_opts.label.split(',')
     for channel in channels:
         if self.cli_opts.rpms:        
             rpms = glob(str(self.cli_opts.rpms))
             for rpm in rpms:
                 nosig_txt = ''
                 if config['allow_nosig']:
                     nosig_txt = '--nosig'
                 cmd = "%s %s -u %s -p %s --server %s -c %s %s" % \
                     (config['cmd_rhnpush'], rpm, config['user'], 
                      config['password'], config['server'], 
                      channel, nosig_txt)
                 res = gso(cmd)
                 if res[0] != 0:
                     log.warn(res[1])
                 
     if self.cli_opts.srpms:
         srpms = glob(str(self.cli_opts.srpms))
         for srpm in srpms:
             if os.path.exists(srpm):
                 nosig_txt = ''
                 if config['allow_nosig']:
                     nosig_txt = '--nosig'
                 cmd = "%s %s --source -u %s -p %s --server %s %s" % \
                     (config['cmd_rhnpush'], srpm, 
                      config['user'], config['password'], 
                      config['server'], nosig_txt)
                 res = gso(cmd)
                 if res[0] != 0:
                     log.warn(res[1])
             else:
                 log.warn("SRPM '%s' doesn't exist!" % srpm)          
コード例 #19
0
ファイル: plugin.py プロジェクト: YangXinNewlife/cement
def get_enabled_plugins():
    """
    Open plugin config files from plugin_config_dir and determine if they are
    enabled.  If so, append them to 'enabled_plugins' in the root config.
    Uses the namespaces['root'].config dictionary.
    
    """
    config = get_config()
    if config.has_key('enabled_plugins'):
        enabled_plugins = config['enabled_plugins']
    else:
        enabled_plugins = []

    # determine enabled plugins

    # first from config files
    for _file in config['config_files']:
        try:
            cnf = ConfigObj(_file)
        except IOError, error:
            log.warning("Unable to open config '%s': %s" % \
                (_file, error.args[1]))
            continue

        for sect in cnf.sections:
            if sect != 'root' and cnf[sect].has_key('enable_plugin') \
                              and t_f_pass(cnf[sect]['enable_plugin']) == True \
                              and not sect in enabled_plugins:
                if not cnf[sect].has_key('provider'):
                    provider = config['app_module']
                else:
                    provider = cnf[sect]['provider']
                    setup_logging_for_plugin_provider(provider)
                plugin = "%s.plugin.%s" % (provider, sect)
                if plugin not in enabled_plugins:
                    enabled_plugins.append(plugin)
コード例 #20
0
ファイル: plugin.py プロジェクト: derks/cement
def get_enabled_plugins():
    """
    Open plugin config files from plugin_config_dir and determine if they are
    enabled.  If so, append them to 'enabled_plugins' in the root config.
    Uses the namespaces['root'].config dictionary.
    
    """
    config = get_config()
    if config.has_key('enabled_plugins'):
        enabled_plugins = config['enabled_plugins']
    else:
        enabled_plugins = []
        
    # determine enabled plugins
    
    # first from config files
    for _file in config['config_files']:    
        try:
            cnf = ConfigObj(_file)
        except IOError, error:
            log.warning("Unable to open config '%s': %s" % \
                (_file, error.args[1]))
            continue
            
        for sect in cnf.sections:
            if sect != 'root' and cnf[sect].has_key('enable_plugin') \
                              and t_f_pass(cnf[sect]['enable_plugin']) == True \
                              and not sect in enabled_plugins:
                if not cnf[sect].has_key('provider'):
                    provider = config['app_module']
                else:
                    provider = cnf[sect]['provider']
                    setup_logging_for_plugin_provider(provider)
                plugin = "%s.plugin.%s" % (provider, sect)
                if plugin not in enabled_plugins:
                    enabled_plugins.append(plugin)
コード例 #21
0
ファイル: root.py プロジェクト: derks/cement
This is the RootController for the cement_test application.  This can be used
to expose commands to the root namespace which will be accessible under:

    $ cement_test --help
  
"""

from tempfile import mkstemp
from cement.core.controller import CementController, expose
from cement.core.namespace import get_config
from cement.core.log import get_logger

from cement_test.core.exc import CementTestArgumentError

log = get_logger(__name__)
config = get_config()

class RootController(CementController):
    @expose(is_hidden=True)
    def nosetests(self, *args, **kw):
        pass
        
    @expose('cement_test.templates.root.error', is_hidden=True)
    def error(self, errors=[]):
        """
        This can be called when catching exceptions giving the developer a 
        clean way of presenting errors to the user.
        
        Required Arguments:
        
            errors
コード例 #22
0
"""The purpose of this module is to test command functionality."""

from nose.tools import raises, with_setup, eq_, ok_

from cement import namespaces
from cement.core.namespace import get_config
from cement.core.command import run_command
from cement.core.testing import simulate
from cement.core.exc import CementRuntimeError, CementArgumentError

from cement_test.core.testing import setup_func, teardown_func

config = get_config()


@raises(CementArgumentError)
@with_setup(setup_func, teardown_func)
def test_run_command_namespace():
    # raises cause example is a namespace
    run_command('example2')


@raises(CementArgumentError)
@with_setup(setup_func, teardown_func)
def test_run_command_namespace_help():
    # raises cause example is a namespace
    run_command('example-help')


@with_setup(setup_func, teardown_func)
def test_run_command():
コード例 #23
0
ファイル: ircbot.py プロジェクト: pombredanne/ius-tools
def post_options_hook(*args, **kw):
    config = get_config()
    if not os.path.exists(config['ircbot']['pid_file']):
        os.makedirs(config['ircbot']['pid_file'])
コード例 #24
0
ファイル: ircbot.py プロジェクト: dev-ace/ius-tools
def post_options_hook(*args, **kw):
    config = get_config()
    if not os.path.exists(config['ircbot']['pid_file']):
        os.makedirs(config['ircbot']['pid_file'])
コード例 #25
0
ファイル: admin.py プロジェクト: jness/ius-tools
 def process_tags(self):
     config = get_config()
     for tag_label in config['admin']['managed_tags']:
         self.process_tag(tag_label)
コード例 #26
0
ファイル: admin.py プロジェクト: jness/ius-tools
    def process_tag(self, tag_label):
        config = get_config()
        log.info("Processing tag %s" % tag_label)
        res = self._wrap(self.mf.tag.get_one, 
                         "%s-candidate" % tag_label, 
                         'ius')
        from_tag = res['data']['tag']
        
        res = self._wrap(self.mf.tag.get_one, tag_label, 'ius')
        to_tag = res['data']['tag']
        
        old_tag = None
        if tag_label == 'stable':
            res = self._wrap(self.mf.tag.get_one, 'archive', 'ius')
            old_tag = res['data']['tag']
        
        res = self._wrap(self.mf.tag.move_builds, 
                         from_tag['label'], 
                         'ius', 
                         to_tag['label']) 
        untagged_builds = res['data']['untagged_builds']
        moved_builds = res['data']['moved_builds']
        
        if not len(moved_builds) > 0:
            return 
           
        # if tagging to stable, remove all other tags
        if to_tag['label'] == 'stable':
            res = self._wrap(self.mf.tag.get_all, dict(project_label='ius'))
            all_tags = res['data']['tags']
            for build in moved_builds:
                for _tag in all_tags:
                    if _tag['label'] == 'stable':
                        continue
                    res = self._wrap(self.mf.build.untag, 
                                     build, 
                                     'ius',
                                     _tag['label'])

        # if there were older untagged_builds move them to old_tag
        if old_tag and len(untagged_builds) > 0:
            msg = TAG_AND_UNTAG_MSG % (
                    to_tag['label'], 
                    "\n\r    - ".join(moved_builds),
                    "\n\r    - ".join(untagged_builds),
                    FOOTER
                    )
            for old_label in untagged_builds:
                res = self._wrap(self.mf.build.tag,
                                 old_label,
                                 'ius',
                                 old_tag['label'])
        else:
            msg = TAG_MSG % (
                    to_tag['label'], 
                    "\n\r   - ".join(moved_builds),
                    FOOTER
                    )
        
        for build in moved_builds:
            log.info("  `-> %s" % build)
           
        send_mail(config['admin']['announce_email'],
                  "new builds moved to tag '%s'" % to_tag['label'],
                  msg)
コード例 #27
0
def test_get_bogus_config():  
    _cnf = get_config('bogus_namespace')
コード例 #28
0
ファイル: admin.py プロジェクト: iuscommunity/ius-tools
    def push_to_public(self):
        config = get_config()
        log.info("pushing changes to %s" % config["admin"]["remote_rsync_path"])
        if self.cli_opts.delete:
            status = os.system(
                "%s -az --delete %s/ius/ %s/ius/ --exclude %s >/dev/null"
                % (
                    config["admin"]["rsync_binpath"],
                    config["admin"]["repo_base_path"],
                    config["admin"]["remote_rsync_path"],
                    config["admin"]["remote_exclude"],
                )
            )
        else:
            status = os.system(
                "%s -az %s/ius/ %s/ius/ --exclude %s >/dev/null"
                % (
                    config["admin"]["rsync_binpath"],
                    config["admin"]["repo_base_path"],
                    config["admin"]["remote_rsync_path"],
                    config["admin"]["remote_exclude"],
                )
            )

        # handle errors on our rsync command
        if status != 0:
            log.error("rsync failed and returned status %s" % status)
            raise Exception("fatal error")

        # Internal IUS Push
        if config["admin"]["internal_remote_rsync_path"]:

            # Removing an entire tag will cause it to be re-downloaded each time,
            # this will cause the script to take hours....
            # exclude unsynced tags
            # if config['admin']['internal_tag_exclude']:
            #    for tag_exclude in config['admin']['internal_tag_exclude']:
            #        if os.path.exists("%s/ius/%s" % (config['admin']['repo_base_path'], tag_exclude)):
            #            log.info("removing %s from %s/ius/" % (tag_exclude, config['admin']['repo_base_path']))
            #            shutil.rmtree("%s/ius/%s" % (config['admin']['repo_base_path'], tag_exclude))

            # create a rsync friendly exclude command
            if config["admin"]["internal_tag_exclude"]:
                tag_exclude_list = []
                for tag_exclude in config["admin"]["internal_tag_exclude"]:
                    if os.path.exists("%s/ius/%s" % (config["admin"]["repo_base_path"], tag_exclude)):
                        log.info("appending %s to exclude list" % tag_exclude)
                        tag_exclude_list.append("--exclude '%s'" % tag_exclude)
            if tag_exclude_list:
                exclude_command = " ".join([i for i in tag_exclude_list])
            else:
                exclude_command = ""

            # remove any excludes if configured
            if config["admin"]["internal_remote_exclude"]:
                for exclude in config["admin"]["internal_remote_exclude"]:
                    log.info("removing %s from %s" % (exclude, config["admin"]["repo_base_path"]))
                    for dirs in os.walk("%s/ius/" % os.path.expanduser(config["admin"]["repo_base_path"])):
                        if exclude in ", ".join(dirs[2]):
                            for f in dirs[2]:
                                if exclude in f:
                                    os.remove("%s/%s" % (dirs[0], f))

                # rebuild our meta data now that
                # files have been removed
                repo = IUSRepo(config, self.mf)
                repo.build_metadata()

            log.info("pushing changes to %s" % config["admin"]["internal_remote_rsync_path"])
            if self.cli_opts.delete:
                status = os.system(
                    "%s -az %s --delete %s/ius/ %s/ >/dev/null"
                    % (
                        config["admin"]["rsync_binpath"],
                        exclude_command,
                        config["admin"]["repo_base_path"],
                        config["admin"]["internal_remote_rsync_path"],
                    )
                )
            else:
                status = os.system(
                    "%s -az %s %s/ius/ %s/ >/dev/null"
                    % (
                        config["admin"]["rsync_binpath"],
                        exclude_command,
                        config["admin"]["repo_base_path"],
                        config["admin"]["internal_remote_rsync_path"],
                    )
                )

            # handle errors on our rsync command
            if status != 0:
                log.error("rsync failed and returned status %s" % status)
                raise Exception("fatal error")
コード例 #29
0
ファイル: errata.py プロジェクト: nixar/satcli
    def create(self, *args, **kw):
        config = get_config()
        
        errors = []
        channels = []
        if not self.cli_opts.advisory:
            errors.append(('SatCLIArgumentError', 
                           'errata -a/--advisory required.'))
        
        if not self.cli_opts.rpms:
            errors.append(('SatCLIArgumentError', 
                           'errata --rpms required.'))                   
        
        if not self.cli_opts.channel and not self.cli_opts.channels_file:
            errors.append(('SatCLIArgumentError', 
                           'errata -c/--channel or --channels-file required.'))                   
        
        if self.cli_opts.channel:
            _channels = self.cli_opts.channel.split(',')
            for _c in _channels:
                channels.append(_c)
        
        if self.cli_opts.channels_file:
            if os.path.exists(self.cli_opts.channels_file):
                f = open(self.cli_opts.channels_file, 'r')
                for line in f.readlines():
                    channels.append(line.strip('\n'))
            else:
                log.warn("channels file '%s' doesn't exist!" % \
                         self.cli_opts.channels_file)
        
        if not self.cli_opts.synopsis:
            errors.append(('SatCLIArgumentError', 
                           'errata --synopsis required.'))
        
        if not self.cli_opts.product:
            errors.append(('SatCLIArgumentError', 
                           'errata --product required.'))
        
        if not self.cli_opts.advisory_type:
            errors.append(('SatCLIArgumentError', 
                           'errata --type required.'))
        
        if not self.cli_opts.advisory_type in ['bug', 'enhancement', 'security']:
            errors.append(('SatCLIArgumentError',
                       'errata --type must be one of bug, enhancement, security.'))                   

        if not self.cli_opts.description:
            errors.append(('SatCLIArgumentError', 
                           'errata --description required.'))
                           
        abort_on_error(errors)
                
        if not self.cli_opts.topic:
            self.cli_opts.topic = "%s update." % self.cli_opts.advisory_type.capitalize()
        
        if not self.cli_opts.solution:
            self.cli_opts.solution = config['errata']['solution']
        
        if self.cli_opts.keywords:
            self.cli_opts.keywords = self.cli_opts.keywords.split(',')
        else:
            self.cli_opts.keywords = []


        rpms = glob(str(self.cli_opts.rpms))
        rpms_data = []
        package_ids = []
        for r in rpms:
            nosig_txt = ''
            if config['allow_nosig']:
                nosig_txt = '--nosig'
            cmd = "%s %s -u %s -p %s --server %s %s" % \
                (config['cmd_rhnpush'], r, config['user'], 
                 config['password'], 
                 config['server'], nosig_txt)
            gso(cmd)
            rpm = RPM(file(r))  
            package = g.proxy.query(model.Package, just_one=True,
                                name=rpm[rpmdefs.RPMTAG_NAME], 
                                version=rpm[rpmdefs.RPMTAG_VERSION], 
                                release=rpm[rpmdefs.RPMTAG_RELEASE], 
                                arch=rpm[rpmdefs.RPMTAG_ARCH])
            rpms_data.append(package)
        if self.cli_opts.srpm:
            if os.path.exists(self.cli_opts.srpm):
                rpm = RPM(file(self.cli_opts.srpm))  
                nosig_txt = ''
                if config['allow_nosig']:
                    nosig_txt = '--nosig'
                cmd = "%s %s --source -u %s -p %s --server %s %s" % \
                    (config['cmd_rhnpush'], self.cli_opts.srpm, 
                     config['user'], config['password'], 
                     config['server'], nosig_txt)
                gso(cmd)
            else:
                log.warn("SRPM '%s' doesn't exist!" % self.cli_opts.srpm)    

        for p in rpms_data:
            package_ids.append(p.id)
        
        if self.cli_opts.advisory_type == 'bug':
            self.cli_opts.advisory_type = 'Bug Fix Advisory'
        elif self.cli_opts.advisory_type == 'enhancement':
            self.cli_opts.advisory_type = 'Product Enhancement Advisory'
        elif self.cli_opts.advisory_type == 'security':
            self.cli_opts.advisory_type = 'Security Advisory'        
            
            
        e = model.Errata()
        e.synopsis = self.cli_opts.synopsis
        e.advisory_name = self.cli_opts.advisory
        e.advisory_release = 1
        e.advisory_type = self.cli_opts.advisory_type
        e.product = self.cli_opts.product
        e.topic = self.cli_opts.topic
        e.description = self.cli_opts.description
        e.references = self.cli_opts.references or ''
        e.notes = self.cli_opts.notes or ''
        e.solution = self.cli_opts.solution
        e.bug_ids = []
        e.keywords = self.cli_opts.keywords or []
        e.package_ids = package_ids
        e.publish = self.cli_opts.publish
        e.channels = channels       
        g.proxy.create(e)
        res = g.proxy.query(model.Errata, just_one=True, all_data=True,
                            advisory=self.cli_opts.advisory)     
        return dict(errata=res)
コード例 #30
0
ファイル: channel.py プロジェクト: nixar/satcli
 def test(*args, **kw):
     c = get_config()
     print c['mirror']
コード例 #31
0
    def push_to_public(self):
        config = get_config()
        log.info("pushing changes to %s" %
                 config['admin']['remote_rsync_path'])
        if self.cli_opts.delete:
            status = os.system('%s -az --delete %s/ius/ %s/ius/ --exclude %s >/dev/null' % \
                     (config['admin']['rsync_binpath'],
                      config['admin']['repo_base_path'],
                      config['admin']['remote_rsync_path'],
                      config['admin']['remote_exclude']))
        else:
            status = os.system('%s -az %s/ius/ %s/ius/ --exclude %s >/dev/null' % \
                     (config['admin']['rsync_binpath'],
                      config['admin']['repo_base_path'],
                      config['admin']['remote_rsync_path'],
                      config['admin']['remote_exclude']))

        # handle errors on our rsync command
        if status != 0:
            log.error("rsync failed and returned status %s" % status)
            raise Exception("fatal error")

        # Internal IUS Push
        if config['admin']['internal_remote_rsync_path']:

            # Removing an entire tag will cause it to be re-downloaded each time,
            # this will cause the script to take hours....
            # exclude unsynced tags
            #if config['admin']['internal_tag_exclude']:
            #    for tag_exclude in config['admin']['internal_tag_exclude']:
            #        if os.path.exists("%s/ius/%s" % (config['admin']['repo_base_path'], tag_exclude)):
            #            log.info("removing %s from %s/ius/" % (tag_exclude, config['admin']['repo_base_path']))
            #            shutil.rmtree("%s/ius/%s" % (config['admin']['repo_base_path'], tag_exclude))

            # create a rsync friendly exclude command
            if config['admin']['internal_tag_exclude']:
                tag_exclude_list = []
                for tag_exclude in config['admin']['internal_tag_exclude']:
                    if os.path.exists(
                            "%s/ius/%s" %
                        (config['admin']['repo_base_path'], tag_exclude)):
                        log.info("appending %s to exclude list" % tag_exclude)
                        tag_exclude_list.append("--exclude '%s'" % tag_exclude)
            if tag_exclude_list:
                exclude_command = ' '.join([i for i in tag_exclude_list])
            else:
                exclude_command = ''

            # remove any excludes if configured
            if config['admin']['internal_remote_exclude']:
                for exclude in config['admin']['internal_remote_exclude']:
                    log.info("removing %s from %s" %
                             (exclude, config['admin']['repo_base_path']))
                    for dirs in os.walk('%s/ius/' % os.path.expanduser(
                            config['admin']['repo_base_path'])):
                        if exclude in ', '.join(dirs[2]):
                            for f in dirs[2]:
                                if exclude in f:
                                    os.remove('%s/%s' % (dirs[0], f))

                # rebuild our meta data now that
                # files have been removed
                repo = IUSRepo(config, self.mf)
                repo.build_metadata()

            log.info("pushing changes to %s" %
                     config['admin']['internal_remote_rsync_path'])
            if self.cli_opts.delete:
                status = os.system('%s -az %s --delete %s/ius/ %s/ >/dev/null' % \
                         (config['admin']['rsync_binpath'],
                          exclude_command,
                          config['admin']['repo_base_path'],
                          config['admin']['internal_remote_rsync_path']))
            else:
                status = os.system('%s -az %s %s/ius/ %s/ >/dev/null' % \
                         (config['admin']['rsync_binpath'],
                          exclude_command,
                          config['admin']['repo_base_path'],
                          config['admin']['internal_remote_rsync_path']))

            # handle errors on our rsync command
            if status != 0:
                log.error("rsync failed and returned status %s" % status)
                raise Exception("fatal error")
コード例 #32
0
ファイル: admin.py プロジェクト: iuscommunity/ius-tools
 def process_tags(self):
     config = get_config()
     for tag_label in config["admin"]["managed_tags"]:
         self.process_tag(tag_label)
コード例 #33
0
ファイル: app_setup.py プロジェクト: YangXinNewlife/cement
    # Setup logging for console and file
    setup_logging(to_console=namespaces['root'].config['log_to_console'],
                  clear_loggers=clear_loggers)
    log = get_logger(__name__)
    log.debug('logging initialized')
    log.debug('setup app per the following configs: %s' % \
              namespaces['root'].config['config_files'])
    define_default_hooks()
    define_default_handler_types()
    
    register_default_handlers()
    
    __import__("%s.bootstrap" % namespaces['root'].config['app_module'], 
               globals(), locals(), ['root'])
    
    for res in run_hooks('post_bootstrap_hook'):
        pass
    
    # load all plugins
    load_all_plugins()
    
    # Allow plugins to add config validation for the global namespace
    for res in run_hooks('validate_config_hook', 
                         config=namespaces['root'].config):
        pass
    
    # Merge namespaces under root dict
    for nam in namespaces:
        if nam != 'root':
            namespaces['root'].config[nam] = get_config(nam)
コード例 #34
0
 def process_tags(self):
     config = get_config()
     for tag_label in config['admin']['managed_tags']:
         self.process_tag(tag_label)