def _fetch_metadata(self, fetch_url):
     try:
         r = self.session.get(fetch_url)
         r.raise_for_status()
     except self._requests_impl.exceptions.RequestException as e:
         log.getLogger(__name__).warn(e)
         raise exc.Ec2MetadataNotAvailable
     content = r.text
     if fetch_url[-1] == '/':
         new_content = {}
         for subkey in content.split("\n"):
             if '=' in subkey:
                 subkey = subkey[:subkey.index('=')] + '/'
             sub_fetch_url = fetch_url + subkey
             if subkey[-1] == '/':
                 subkey = subkey[:-1]
             new_content[subkey] = self._fetch_metadata(sub_fetch_url)
         content = new_content
     return content
Пример #2
0
 def _fetch_metadata(self, fetch_url):
     try:
         r = self.session.get(fetch_url)
         r.raise_for_status()
     except self._requests_impl.exceptions.RequestException as e:
         log.getLogger(__name__).warn(e)
         raise exc.Ec2MetadataNotAvailable
     content = r.text
     if fetch_url[-1] == '/':
         new_content = {}
         for subkey in content.split("\n"):
             if '=' in subkey:
                 subkey = subkey[:subkey.index('=')] + '/'
             sub_fetch_url = fetch_url + subkey
             if subkey[-1] == '/':
                 subkey = subkey[:-1]
             new_content[subkey] = self._fetch_metadata(sub_fetch_url)
         content = new_content
     return content
Пример #3
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from heatclient import client as heatclient
from keystoneclient.v3 import client as keystoneclient
from oslo_config import cfg

from os_collect_config import exc
from os_collect_config import keystone
from os_collect_config import merger
from os_collect_config.openstack.common import log

CONF = cfg.CONF
logger = log.getLogger(__name__)

opts = [
    cfg.StrOpt('user-id',
               help='User ID for API authentication'),
    cfg.StrOpt('password',
               help='Password for API authentication'),
    cfg.StrOpt('project-id',
               help='ID of project for API authentication'),
    cfg.StrOpt('auth-url',
               help='URL for API authentication'),
    cfg.StrOpt('stack-id',
               help='ID of the stack this deployment belongs to'),
    cfg.StrOpt('resource-name',
               help='Name of resource in the stack to be polled'),
]
Пример #4
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from heatclient import client as heatclient
from keystoneclient.v3 import client as keystoneclient
from oslo_config import cfg

from os_collect_config import exc
from os_collect_config import keystone
from os_collect_config import merger
from os_collect_config.openstack.common import log

CONF = cfg.CONF
logger = log.getLogger(__name__)

opts = [
    cfg.StrOpt('user-id', help='User ID for API authentication'),
    cfg.StrOpt('password', help='Password for API authentication'),
    cfg.StrOpt('project-id', help='ID of project for API authentication'),
    cfg.StrOpt('auth-url', help='URL for API authentication'),
    cfg.StrOpt('stack-id', help='ID of the stack this deployment belongs to'),
    cfg.StrOpt('resource-name',
               help='Name of resource in the stack to be polled'),
]
name = 'heat'


class Collector(object):
    def __init__(self, keystoneclient=keystoneclient, heatclient=heatclient):
def __main__(args=sys.argv, collector_kwargs_map=None):
    signal.signal(signal.SIGHUP, reexec_self)
    setup_conf()
    CONF(args=args[1:], prog="os-collect-config",
         version=version.version_info.version_string())

    # This resets the logging infrastructure which prevents capturing log
    # output in tests cleanly, so should only be called if there isn't already
    # handlers defined i.e. not in unit tests
    if not log.getLogger(None).logger.handlers:
        log.setup("os-collect-config")

    if CONF.print_cachedir:
        print(CONF.cachedir)
        return

    unknown_collectors = set(CONF.collectors) - set(COLLECTORS.keys())
    if unknown_collectors:
        raise exc.InvalidArguments(
            'Unknown collectors %s. Valid collectors are: %s' %
            (list(unknown_collectors), DEFAULT_COLLECTORS))

    if CONF.force:
        CONF.set_override('one_time', True)

    config_files = CONF.config_file
    config_hash = getfilehash(config_files)
    while True:
        store_and_run = bool(CONF.command and not CONF.print_only)
        (changed_keys, content) = collect_all(
            cfg.CONF.collectors,
            store=store_and_run,
            collector_kwargs_map=collector_kwargs_map)
        if store_and_run:
            if changed_keys or CONF.force:
                # ignore HUP now since we will reexec after commit anyway
                signal.signal(signal.SIGHUP, signal.SIG_IGN)
                try:
                    call_command(content, CONF.command)
                except subprocess.CalledProcessError as e:
                    logger.error('Command failed, will not cache new data. %s'
                                 % e)
                    if not CONF.one_time:
                        new_config_hash = getfilehash(config_files)
                        if config_hash == new_config_hash:
                            logger.warn(
                                'Sleeping %.2f seconds before re-exec.' %
                                CONF.polling_interval
                            )
                            time.sleep(CONF.polling_interval)
                        else:
                            # The command failed but the config file has
                            # changed re-exec now as the config file change
                            # may have fixed things.
                            logger.warn('Config changed, re-execing now')
                            config_hash = new_config_hash
                else:
                    for changed in changed_keys:
                        cache.commit(changed)
                if not CONF.one_time:
                    reexec_self()
            else:
                logger.debug("No changes detected.")
            if CONF.one_time:
                break
            else:
                logger.info("Sleeping %.2f seconds.", CONF.polling_interval)
                time.sleep(CONF.polling_interval)
        else:
            print(json.dumps(content, indent=1))
            break
    cfg.BoolOpt('print-cachedir',
                default=False,
                help='Print out the value of cachedir and exit immediately.'),
    cfg.BoolOpt('force',
                default=False,
                help='Pass this to force running the command even if nothing'
                ' has changed. Implies --one-time.'),
    cfg.BoolOpt('print', dest='print_only',
                default=False,
                help='Query normally, print the resulting configs as a json'
                ' map, and exit immediately without running command if it is'
                ' configured.'),
]

CONF = cfg.CONF
logger = log.getLogger('os-collect-config')

COLLECTORS = {ec2.name: ec2,
              cfn.name: cfn,
              heat.name: heat,
              heat_local.name: heat_local,
              local.name: local}


def setup_conf():
    ec2_group = cfg.OptGroup(name='ec2',
                             title='EC2 Metadata options')

    cfn_group = cfg.OptGroup(name='cfn',
                             title='CloudFormation API Metadata options')
Пример #7
0
def __main__(args=sys.argv, collector_kwargs_map=None):
    signal.signal(signal.SIGHUP, reexec_self)
    setup_conf()
    CONF(args=args[1:],
         prog="os-collect-config",
         version=version.version_info.version_string())

    # This resets the logging infrastructure which prevents capturing log
    # output in tests cleanly, so should only be called if there isn't already
    # handlers defined i.e. not in unit tests
    if not log.getLogger(None).logger.handlers:
        log.setup("os-collect-config")

    if CONF.print_cachedir:
        print(CONF.cachedir)
        return

    unknown_collectors = set(CONF.collectors) - set(COLLECTORS.keys())
    if unknown_collectors:
        raise exc.InvalidArguments(
            'Unknown collectors %s. Valid collectors are: %s' %
            (list(unknown_collectors), DEFAULT_COLLECTORS))

    if CONF.force:
        CONF.set_override('one_time', True)

    exitval = 0
    config_files = CONF.config_file
    config_hash = getfilehash(config_files)
    sleep_time = 1
    while True:
        store_and_run = bool(CONF.command and not CONF.print_only)
        (changed_keys,
         content) = collect_all(cfg.CONF.collectors,
                                store=store_and_run,
                                collector_kwargs_map=collector_kwargs_map)
        if store_and_run:
            if changed_keys or CONF.force:
                # shorter sleeps while changes are detected allows for faster
                # software deployment dependency processing
                sleep_time = 1
                # ignore HUP now since we will reexec after commit anyway
                signal.signal(signal.SIGHUP, signal.SIG_IGN)
                try:
                    call_command(content, CONF.command)
                except subprocess.CalledProcessError as e:
                    exitval = e.returncode
                    logger.error(
                        'Command failed, will not cache new data. %s' % e)
                    if not CONF.one_time:
                        new_config_hash = getfilehash(config_files)
                        if config_hash == new_config_hash:
                            logger.warn(
                                'Sleeping %.2f seconds before re-exec.' %
                                sleep_time)
                            time.sleep(sleep_time)
                        else:
                            # The command failed but the config file has
                            # changed re-exec now as the config file change
                            # may have fixed things.
                            logger.warn('Config changed, re-execing now')
                            config_hash = new_config_hash
                else:
                    for changed in changed_keys:
                        cache.commit(changed)
                if not CONF.one_time:
                    reexec_self()
            else:
                logger.debug("No changes detected.")
            if CONF.one_time:
                break
            else:
                logger.info("Sleeping %.2f seconds.", sleep_time)
                time.sleep(sleep_time)

            sleep_time *= 2
            if sleep_time > CONF.polling_interval:
                sleep_time = CONF.polling_interval
        else:
            print(json.dumps(content, indent=1))
            break
    return exitval
Пример #8
0
    cfg.BoolOpt('print',
                dest='print_only',
                default=False,
                help='Query normally, print the resulting configs as a json'
                ' map, and exit immediately without running command if it is'
                ' configured.'),
    cfg.MultiStrOpt('deployment-key',
                    default=['deployments'],
                    help='Key(s) to explode into multiple collected outputs. '
                    'Parsed according to the expected Metadata created by '
                    'OS::Heat::StructuredDeployment. Only Exploded if seen at '
                    'the root of the Metadata.')
]

CONF = cfg.CONF
logger = log.getLogger('os-collect-config')

COLLECTORS = {
    ec2.name: ec2,
    cfn.name: cfn,
    heat.name: heat,
    heat_local.name: heat_local,
    local.name: local,
    request.name: request,
    zaqar.name: zaqar
}


def setup_conf():
    ec2_group = cfg.OptGroup(name='ec2', title='EC2 Metadata options')
Пример #9
0
        help="Query normally, print the resulting configs as a json"
        " map, and exit immediately without running command if it is"
        " configured.",
    ),
    cfg.MultiStrOpt(
        "deployment-key",
        default=["deployments"],
        help="Key(s) to explode into multiple collected outputs. "
        "Parsed according to the expected Metadata created by "
        "OS::Heat::StructuredDeployment. Only Exploded if seen at "
        "the root of the Metadata.",
    ),
]

CONF = cfg.CONF
logger = log.getLogger("os-collect-config")

COLLECTORS = {
    ec2.name: ec2,
    cfn.name: cfn,
    heat.name: heat,
    heat_local.name: heat_local,
    local.name: local,
    request.name: request,
    zaqar.name: zaqar,
}


def setup_conf():
    ec2_group = cfg.OptGroup(name="ec2", title="EC2 Metadata options")