コード例 #1
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--address',
                        help='hostname or ip address of target appliance',
                        default=None)
    parser.add_argument('--sdk_url',
                        help='url to download sdk pkg',
                        default=cfme_data.get("basic_info",
                                              {}).get("netapp_sdk_url"))
    parser.add_argument('--restart',
                        help='restart evmserverd after installation ' +
                        '(required for proper operation)',
                        action="store_true")

    args = parser.parse_args()
    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(address=args.address)
    print('Address: {}'.format(appliance.address))
    print('SDK URL: {}'.format(args.sdk_url))
    print('Restart: {}'.format(args.restart))

    appliance.install_netapp_sdk(sdk_url=args.sdk_url,
                                 reboot=args.restart,
                                 log_callback=log)
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--address',
                        help='hostname or ip address of target appliance',
                        default=None)
    parser.add_argument('--vddk_url', help='url to download vddk pkg')
    parser.add_argument('--reboot',
                        help='reboot after installation ' +
                        '(required for proper operation)',
                        action="store_true")
    parser.add_argument('--force',
                        help='force installation if version detected',
                        action="store_true")

    args = parser.parse_args()

    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(hostname=urlparse(args.address).netloc)

    appliance.install_vddk(reboot=args.reboot,
                           force=args.force,
                           vddk_url=args.vddk_url,
                           log_callback=log)
コード例 #3
0
ファイル: update_rhel.py プロジェクト: apagac/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance')
    parser.add_argument("-u", "--url", help="url(s) to use for update",
        dest="urls", action="append")
    parser.add_argument("-c", "--cleanup", help="Whether to cleanup /etc/yum.repos.d before start",
        dest="cleanup", action="store_true")
    parser.add_argument("--no_wait_ui", help="Whether to NOT wait for UI after reboot",
        dest="no_wait_ui", action="store_false")
    parser.add_argument('--reboot', help='reboot after installation ' +
        '(required for proper operation)', action="store_true", default=False)

    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)
    # Don't reboot here, so we can print updates to the console when we do
    res = ip_a.update_rhel(*args.urls, reboot=False, streaming=True, cleanup=args.cleanup)

    if res.rc == 0:
        if args.reboot:
            print('Rebooting')
            ip_a.reboot(wait_for_web_ui=args.no_wait_ui)
        print('Appliance update complete')

    return res.rc
コード例 #4
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname', help='hostname or ip address of target appliance')
    parser.add_argument('start', action="store_true", default=False, help='Start Merkyl?')
    args = parser.parse_args()

    ip_a = IPAppliance(hostname=args.hostname)

    ip_a.deploy_merkyl(args.start)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance',
        nargs='?', default=None)
    parser.add_argument('--with_ssl', help='update for ssl connections', action="store_true")

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    return ip_a.loosen_pgssl()
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance',
        nargs='?', default=None)
    parser.add_argument('--with_ssl', help='update for ssl connections', action="store_true")

    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)
    return ip_a.loosen_pgssl()
コード例 #7
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter
    )
    parser.add_argument('address', nargs="?", default=None,
        help='hostname or ip address of target appliance')
    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)
    ip_a.fix_ntp_clock()
    print("Time was set")
コード例 #8
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        nargs="?",
                        default=None,
                        help='hostname or ip address of target appliance')
    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)
    ip_a.fix_ntp_clock()
    print("Time was set")
コード例 #9
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', nargs='?', default=None,
        help='hostname or ip address of target appliance')

    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)

    status = ip_a.precompile_assets()
    if status == 0:
        ip_a.evmserverd.restart()
        print("EVM service restarted, UI should be available shortly")
    return status
コード例 #10
0
ファイル: merkyl_deploy.py プロジェクト: lcouzens/cfme_tests
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname',
                        help='hostname or ip address of target appliance')
    parser.add_argument('start',
                        action="store_true",
                        default=False,
                        help='Start Merkyl?')
    args = parser.parse_args()

    ip_a = IPAppliance(hostname=args.hostname)

    ip_a.deploy_merkyl(args.start)
コード例 #11
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('source', nargs='?', default='ManageIQ',
        help='Source Domain name')
    parser.add_argument('dest', nargs='?', default='Default',
        help='Destination Domain name')
    args = parser.parse_args()

    ip_a = IPAppliance(hostname=args.hostname)
    status, out = ip_a.clone_domain(args.source, args.dest)
    return status
コード例 #12
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('source', nargs='?', default='ManageIQ',
        help='Source Domain name')
    parser.add_argument('dest', nargs='?', default='Default',
        help='Destination Domain name')
    args = parser.parse_args()

    ip_a = IPAppliance(hostname=args.hostname)
    status, out = ip_a.clone_domain(args.source, args.dest)
    return status
コード例 #13
0
def coverage_report_jenkins(jenkins_url, jenkins_jobs, jenkins_user, jenkins_token, appliance_ip,
        appliance_version, wave_size):
    """Aggregate coverage data from jenkins job(s) and upload to sonarqube"""
    if appliance_ip is None and appliance_version is None:
        ValueError('Must specify either --appliance-ip or --find-appliance')
    if appliance_ip is not None and appliance_version is not None:
        ValueError('--appliance-ip and --find-appliance are mutually exclusive options')

    # Find appliance using sprout if asked to do so:
    if appliance_version is not None:
        # TODO: Upstream support
        group = 'downstream-{}z'.format(''.join(appliance_version.split('.')[:2]))
        sprout = SproutClient.from_config()
        logger.info('requesting an appliance from sprout for %s/%s', group, appliance_version)
        pool_id = sprout.request_appliances(
            group,
            version=appliance_version,
            lease_time=env.sonarqube.scanner_lease)
        logger.info('Requested pool %s', pool_id)
        result = None
        try:
            while not result or not (result['fulfilled'] and result['finished']):
                result = sprout.request_check(pool_id)
            appliance_ip = result['appliances'][0]['ip_address']
            logger.info('Received an appliance with IP address: %s', appliance_ip)

            with IPAppliance(hostname=appliance_ip) as appliance:
                exit(aggregate_coverage(
                    appliance,
                    jenkins_url,
                    jenkins_user,
                    jenkins_token,
                    jenkins_jobs,
                    wave_size))

        finally:
            with diaper:
                sprout.destroy_pool(pool_id)
    else:
        # Use and existing appliance.
        with IPAppliance(hostname=appliance_ip) as appliance:
            exit(aggregate_coverage(
                appliance,
                jenkins_url,
                jenkins_user,
                jenkins_token,
                jenkins_jobs,
                wave_size))
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('db_address',
        help='hostname or ip address of external database')
    parser.add_argument('--database', default='vmdb_production',
        help='name of the external database')
    parser.add_argument('--region', default=0, type=int,
        help='region to assign to the new DB')
    parser.add_argument('--username', default=credentials['database']['username'],
        help='username for external database')
    parser.add_argument('--password', default=credentials['database']['password'],
        help='password for external database')
    args = parser.parse_args()

    print('Initializing Appliance External DB')
    ip_a = IPAppliance(hostname=args.address)
    status, out = ip_a.db.enable_external(args.db_address, args.region, args.database,
        args.username, args.password)

    if status != 0:
        print('Enabling DB failed with error:')
        print(out)
        sys.exit(1)
    else:
        print('DB Enabled, evm watchdog should start the UI shortly.')
コード例 #15
0
def call_appliance(ip_address, action, args, kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    target_obj = IPAppliance(hostname=ip_address)
    fields_to_traverse, action = action.split('.')[:-1], action.split('.')[-1]

    # Iterate over non-callables, such as appliance.db
    for field in fields_to_traverse:
        try:
            target_obj = getattr(target_obj, field)
        except AttributeError:
            raise Exception(
                'Field "{}" not found for object "{}"'.format(field, target_obj))

    try:
        call = getattr(target_obj, action)
    except AttributeError:
        raise Exception('Action "{}" not found'.format(action))

    # The final obj may or may not be a callable
    if not callable(call):
        return call
    else:
        try:
            argspec = inspect.getargspec(call)
        except TypeError:
            return call(*args, **kwargs)
        else:
            if argspec.keywords is not None or 'log_callback' in argspec.args:
                kwargs['log_callback'] = generate_log_callback(ip_address)
            return call(*args, **kwargs)
コード例 #16
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        nargs='?',
                        default=None,
                        help='hostname or ip address of target appliance')

    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)

    status = ip_a.precompile_assets()
    if status == 0:
        ip_a.evmserverd.restart()
        print("EVM service restarted, UI should be available shortly")
    return status
コード例 #17
0
def test_ipappliance_use_baseurl():
    ip_a = IPAppliance()
    ip_a_parsed = urlparse(ip_a.url)
    env_parsed = urlparse(store.base_url)
    assert (ip_a_parsed.scheme, ip_a_parsed.netloc) == (env_parsed.scheme,
                                                        env_parsed.netloc)
    assert ip_a.address in store.base_url
コード例 #18
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('url', nargs='?', default=None,
        help='URL of target appliance, e.g. "https://ip_or_host/"')
    parser.add_argument('--num-sec', default=600, type=int, dest='num_sec',
        help='Maximum number of seconds to wait before giving up, default 600 (10 minutes)')

    args = parser.parse_args()
    if args.url:
        ip_a = IPAppliance.from_url(args.url)
    else:
        ip_a = IPAppliance()
    result = ip_a.wait_for_web_ui(timeout=args.num_sec)

    if not result:
        return 1
コード例 #19
0
 def collection_appliance(self):
     # if parallelized, this is decided in sessionstart and written to the conf
     if store.parallelizer_role == 'slave':
         from cfme.utils.appliance import IPAppliance
         return IPAppliance.from_url(conf['.ui-coverage']['collection_appliance'])
     else:
         # otherwise, coverage only happens on one appliance
         return store.current_appliance
コード例 #20
0
ファイル: ui_coverage.py プロジェクト: apagac/cfme_tests
 def collection_appliance(self):
     # if parallelized, this is decided in sessionstart and written to the conf
     if store.parallelizer_role == 'slave':
         from cfme.utils.appliance import IPAppliance
         return IPAppliance.from_url(conf['.ui-coverage']['collection_appliance'])
     else:
         # otherwise, coverage only happens on one appliance
         return store.current_appliance
コード例 #21
0
def get_appliance(appliance_ip):
    """Checks an appliance is not None and if so, loads the appropriate things"""
    from cfme.utils.appliance import IPAppliance, get_or_create_current_appliance
    if not appliance_ip:
        app = get_or_create_current_appliance()
    else:
        app = IPAppliance(hostname=appliance_ip)
    return app
コード例 #22
0
def test_ipappliance_use_baseurl(appliance):
    if isinstance(appliance, DummyAppliance):
        pytest.xfail("Dummy appliance cant provide base_url")
    ip_a = IPAppliance()
    ip_a_parsed = urlparse(ip_a.url)
    env_parsed = urlparse(store.base_url)
    assert (ip_a_parsed.scheme, ip_a_parsed.netloc) == (env_parsed.scheme,
                                                        env_parsed.netloc)
    assert ip_a.address in store.base_url
コード例 #23
0
def get_appliance(appliance_ip):
    """Checks an appliance is not None and if so, loads the appropriate things"""
    from cfme.utils.appliance import IPAppliance, load_appliances_from_config, stack
    if not appliance_ip:
        app = load_appliances_from_config(env)[0]
    else:
        app = IPAppliance(hostname=appliance_ip)
    stack.push(app)  # ensure safety from bad code, phase out later
    return app
コード例 #24
0
def test_context_hack(monkeypatch):

    ip_a = IPAppliance.from_url('http://127.0.0.2/')

    def not_good(*k):
        raise RuntimeError()
    monkeypatch.setattr(ip_a, '_screenshot_capture_at_context_leave', not_good)

    with pytest.raises(ValueError):
        with ip_a:
            raise ValueError("test")
コード例 #25
0
def test_context_hack(monkeypatch):

    ip_a = IPAppliance.from_url('http://127.0.0.2/')

    def not_good(*k):
        raise RuntimeError()
    monkeypatch.setattr(ip_a, '_screenshot_capture_at_context_leave', not_good)

    with pytest.raises(ValueError):
        with ip_a:
            raise ValueError("test")
コード例 #26
0
ファイル: update_rhel.py プロジェクト: nachandr/cfme_tests
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        help='hostname or ip address of target appliance')
    parser.add_argument("-u",
                        "--url",
                        help="url(s) to use for update",
                        dest="urls",
                        action="append")
    parser.add_argument(
        "-c",
        "--cleanup",
        help="Whether to cleanup /etc/yum.repos.d before start",
        dest="cleanup",
        action="store_true")
    parser.add_argument("--no_wait_ui",
                        help="Whether to NOT wait for UI after reboot",
                        dest="no_wait_ui",
                        action="store_false")
    parser.add_argument('--reboot',
                        help='reboot after installation ' +
                        '(required for proper operation)',
                        action="store_true",
                        default=False)

    args = parser.parse_args()
    ip_a = IPAppliance(hostname=args.address)
    # Don't reboot here, so we can print updates to the console when we do
    res = ip_a.update_rhel(*args.urls,
                           reboot=False,
                           streaming=True,
                           cleanup=args.cleanup)

    if res.rc == 0:
        if args.reboot:
            print('Rebooting')
            ip_a.reboot(wait_for_miq_ready=args.no_wait_ui)
        print('Appliance update complete')

    return res.rc
コード例 #27
0
ファイル: install_vddk.py プロジェクト: hhovsepy/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        '--address',
        help='hostname or ip address of target appliance', default=None)
    parser.add_argument('--vddk_url', help='url to download vddk pkg')
    parser.add_argument('--reboot', help='reboot after installation ' +
                        '(required for proper operation)', action="store_true")
    parser.add_argument('--force',
                        help='force installation if version detected', action="store_true")

    args = parser.parse_args()

    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(hostname=urlparse(args.address).netloc)

    appliance.install_vddk(
        reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
コード例 #28
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('url', help='URL of target appliance, e.g. "https://ip_or_host/"')
    parser.add_argument('--num-sec', default=600, type=int, dest='num_sec',
        help='Maximum number of seconds to wait before giving up, default 600 (10 minutes)')

    args = parser.parse_args()
    ip_a = IPAppliance.from_url(args.url)
    result = ip_a.wait_for_web_ui(timeout=args.num_sec)

    if not result:
        return 1
コード例 #29
0
ファイル: setup_env.py プロジェクト: jhutar/integration_tests
def setup_replication_env(cfme_version, provider, lease, sprout_poolid, desc):
    lease_time = tot_time(lease)
    """Multi appliance setup with multi region and replication from remote to global"""
    required_app_count = 2
    sprout_client = SproutClient.from_config()
    if sprout_poolid:
        if sprout_client.call_method('pool_exists', sprout_poolid):
            sprout_pool = sprout_client.call_method('request_check',
                                                    sprout_poolid)
            if len(sprout_pool['appliances']) >= required_app_count:
                print("Processing pool...")
                apps = []
                for app in sprout_pool['appliances']:
                    apps.append(IPAppliance(app['ip_address']))
                sprout_client.set_pool_description(sprout_poolid, desc)
            else:
                sys.exit("Pool does not meet the minimum size requirements!")
        else:
            sys.exit("Pool not found!")

    else:
        print("Provisioning appliances")
        apps, request_id = provision_appliances(count=required_app_count,
                                                cfme_version=cfme_version,
                                                provider=provider,
                                                lease_time=lease_time)
        print("Appliance pool lease time is {}".format(lease))
        sprout_client.set_pool_description(request_id, desc)
    print("Configuring replicated environment")
    ip0 = apps[0].hostname
    ip1 = apps[1].hostname
    opt = '5' if cfme_version >= "5.8" else '8'
    command_set0 = ('ap', '', opt, '1', '1', 'y', '1', 'n', '99', pwd,
                    TimedCommand(pwd, 360), '')
    apps[0].appliance_console.run_commands(command_set0)
    apps[0].wait_for_evm_service()
    apps[0].wait_for_web_ui()
    print("Global region appliance provisioned and configured {}".format(ip0))
    command_set1 = ('ap', '', opt, '2', ip0, '', pwd, '', '1', 'y', '1', 'n',
                    '1', pwd, TimedCommand(pwd, 360), '')
    apps[1].appliance_console.run_commands(command_set1)
    apps[1].wait_for_evm_service()
    apps[1].wait_for_web_ui()
    print("Remote region appliance provisioned and configured {}".format(ip1))
    print("Setup - Replication on remote appliance")
    apps[1].set_pglogical_replication(replication_type=':remote')
    print("Setup - Replication on global appliance")
    apps[0].set_pglogical_replication(replication_type=':global')
    apps[0].add_pglogical_replication_subscription(apps[1].hostname)
    print("Done!")
コード例 #30
0
 def provision_appliances(self,
                          count=1,
                          preconfigured=False,
                          version=None,
                          stream=None,
                          provider=None,
                          provider_type=None,
                          lease_time=60,
                          ram=None,
                          cpu=None,
                          **kwargs):
     # provisioning may take more time than it is expected in some cases
     wait_time = kwargs.get('wait_time', 900)
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method('request_appliances',
                                   preconfigured=preconfigured,
                                   version=version,
                                   provider_type=provider_type,
                                   group=stream,
                                   provider=provider,
                                   lease_time=lease_time,
                                   ram=ram,
                                   cpu=cpu,
                                   count=count,
                                   **kwargs)
     wait_for(lambda: self.call_method('request_check', str(request_id))[
         'finished'],
              num_sec=wait_time,
              message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         app_args = {
             'hostname': appliance['ip_address'],
             'project': appliance['project'],
             'container': appliance['container'],
             'db_host': appliance['db_host']
         }
         appliances.append(IPAppliance(**app_args))
     return appliances, request_id
コード例 #31
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        '--address',
        help='hostname or ip address of target appliance',
        default=None)
    parser.add_argument(
        '--sdk_url',
        help='url to download sdk pkg',
        default=cfme_data.get("basic_info", {}).get("netapp_sdk_url"))
    parser.add_argument('--restart', help='restart evmserverd after installation ' +
        '(required for proper operation)', action="store_true")

    args = parser.parse_args()
    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(hostname=args.address)
    print('Address: {}'.format(appliance.hostname))
    print('SDK URL: {}'.format(args.sdk_url))
    print('Restart: {}'.format(args.restart))

    appliance.install_netapp_sdk(sdk_url=args.sdk_url, reboot=args.restart, log_callback=log)
コード例 #32
0
def temp_extdb_pod_appliance(appliance, provider, extdb_template,
                             template_tags, create_external_database,
                             appliance_data):
    db_host, db_name = create_external_database
    project = 'test-pod-extdb-{t}'.format(
        t=fauxfactory.gen_alphanumeric().lower())
    provision_data = {
        'template': extdb_template['name'],
        'tags': template_tags,
        'vm_name': project,
        'template_params': {
            'DATABASE_IP': db_host,
            'DATABASE_NAME': db_name
        },
        'running_pods':
        set(provider.mgmt.required_project_pods) - {'postgresql'}
    }
    try:
        data = provider.mgmt.deploy_template(**provision_data)
        params = appliance_data.copy()
        params['db_host'] = data['external_ip']
        params['project'] = project
        params['hostname'] = data['url']

        def is_api_available(appliance):
            try:
                return appliance.rest_api.collections.providers.all
            except Exception:
                pass

        with IPAppliance(**params) as appliance:
            # framework will try work with default appliance if browser restarts w/o this
            # workaround
            appliance.is_pod = True
            stack.push(appliance)
            holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
            holder.held_appliance = appliance
            # workaround, appliance looks ready but api may return errors
            wait_for(is_api_available, func_args=[appliance], num_sec=30)
            yield appliance
            stack.pop()
    finally:
        if provider.mgmt.does_vm_exist(project):
            provider.mgmt.delete_vm(project)
コード例 #33
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
        help='hostname or ip address of target appliance')
    parser.add_argument('--region', default=0, type=int,
        help='region to assign to the new DB')
    args = parser.parse_args()

    print('Initializing Appliance Internal DB')
    ip_a = IPAppliance(args.address)
    status, out = ip_a.db.enable_internal(args.region)

    if status != 0:
        print('Enabling DB failed with error:')
        print(out)
        sys.exit(1)
    else:
        print('DB Enabled, evm watchdog should start the UI shortly.')
コード例 #34
0
def call_appliance(ip_address, action, args, kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    appliance = IPAppliance(ip_address)

    try:
        call = getattr(appliance, action)
    except AttributeError:
        raise Exception('Action "{}" not found'.format(action))
    if isinstance(getattr(type(appliance), action), property):
        return call
    else:
        try:
            argspec = inspect.getargspec(call)
        except TypeError:
            return call(*args, **kwargs)
        else:
            if argspec.keywords is not None or 'log_callback' in argspec.args:
                kwargs['log_callback'] = generate_log_callback(ip_address)
            return call(*args, **kwargs)
コード例 #35
0
ファイル: client.py プロジェクト: LaVLaS/cfme_tests
 def provision_appliances(self,
                          count=1,
                          preconfigured=False,
                          version=None,
                          stream=None,
                          provider=None,
                          provider_type=None,
                          lease_time=120,
                          ram=None,
                          cpu=None):
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method('request_appliances',
                                   preconfigured=preconfigured,
                                   version=version,
                                   provider_type=provider_type,
                                   group=stream,
                                   provider=provider,
                                   lease_time=lease_time,
                                   ram=ram,
                                   cpu=cpu,
                                   count=count)
     wait_for(lambda: self.call_method('request_check', str(request_id))[
         'finished'],
              num_sec=300,
              message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         appliances.append(IPAppliance(hostname=appliance['ip_address']))
     return appliances, request_id
コード例 #36
0
def pytest_configure(config):
    if config.getoption("appliances"):
        return
    if not config.getoption('--use-sprout'):
        return

    provision_request = SproutProvisioningRequest.from_config(config)

    mgr = config._sprout_mgr = SproutManager()
    requested_appliances = mgr.request_appliances(provision_request)
    config.option.appliances[:] = []
    appliances = config.option.appliances
    # Push an appliance to the stack to have proper reference for test collection
    # FIXME: this is a bad hack based on the need for controll of collection partitioning
    appliance_stack.push(
        IPAppliance(address=requested_appliances[0]["ip_address"]))
    log.info("Appliances were provided:")
    for appliance in requested_appliances:
        url = "https://{}/".format(appliance["ip_address"])
        appliances.append(url)
        log.info("- %s is %s", url, appliance['name'])

    mgr.reset_timer()
    # Set the base_url for collection purposes on the first appliance
    conf.runtime["env"]["base_url"] = appliances[0]
    # Retrieve and print the template_name for Jenkins to pick up
    template_name = requested_appliances[0]["template_name"]
    conf.runtime["cfme_data"]["basic_info"][
        "appliance_template"] = template_name
    log.info("appliance_template: %s", template_name)
    with project_path.join('.appliance_template').open('w') as template_file:
        template_file.write(
            'export appliance_template="{}"'.format(template_name))
    log.info("Sprout setup finished.")

    config.pluginmanager.register(ShutdownPlugin())
コード例 #37
0
def claim_appliance_and_aggregate(jenkins_url, jenkins_jobs, version,
                                  jenkins_user, jenkins_token, wave_size):
    # TODO: Upstream support
    group = 'downstream-' + ''.join(version.split('.')[:2]) + 'z'
    sprout = SproutClient.from_config()
    logger.info('requesting an appliance from sprout for %s/%s', group,
                version)
    pool_id = sprout.request_appliances(group,
                                        version=version,
                                        lease_time=env.sonarqube.scanner_lease)
    logger.info('Requested pool %s', pool_id)
    result = None
    try:
        while not result or not (result['fulfilled'] and result['finished']):
            result = sprout.request_check(pool_id)
        appliance_ip = result['appliances'][0]['ip_address']
        logger.info('Received an appliance with IP address: %s', appliance_ip)
        with IPAppliance(hostname=appliance_ip) as appliance:
            exit(
                aggregate_coverage(appliance, jenkins_url, jenkins_user,
                                   jenkins_token, jenkins_jobs, wave_size))
    finally:
        with diaper:
            sprout.destroy_pool(pool_id)
コード例 #38
0
def test_ipappliance_from_address():
    address = '1.2.3.4'
    ip_a = IPAppliance(address)
    assert ip_a.address == address
    assert ip_a.url == 'https://{}/'.format(address)
コード例 #39
0
def test_ipappliance_managed_providers(infra_provider):
    ip_a = IPAppliance()
    assert infra_provider in ip_a.managed_known_providers
コード例 #40
0
ファイル: clone_template.py プロジェクト: akarol/cfme_tests
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance
            flavor = kwargs.get('flavor', 'c3.xlarge')
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
        deploy_args['key_name'] = "shared"
        # we want to override default cloud-init which disables root login and password login
        cloud_init_dict = {
            'chpasswd':
            {
                'expire': False,
                'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password'])
            },
            'disable_root': 0,
            'ssh_pwauth': 1
        }
        cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict,
                                                               default_flow_style=False))
        deploy_args['user_data'] = cloud_init
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor') or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('provider.deploy_template failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy {}'.format(deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(hostname=ip)
            else:
                app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name'])
            if provider_type == 'gce':
                with app as ipapp:
                    ipapp.configure_gce()
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command('find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
コード例 #41
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: CandU
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(hostname=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #42
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://{}/'.format(address)
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.hostname == address
コード例 #43
0
def setup_multiregion_env(cfme_version, provider_type, provider, lease,
                          sprout_poolid, desc, remote_nodes, add_prov):
    lease_time = tot_time(lease)
    provider_type = None if provider else provider_type
    """Multi appliance setup with multi region and replication from remote to global"""

    sprout_client = SproutClient.from_config()

    required_app_count = 1  # global app
    required_app_count += remote_nodes

    if sprout_poolid:
        if sprout_client.call_method('pool_exists', sprout_poolid):
            sprout_pool = sprout_client.call_method('request_check',
                                                    sprout_poolid)
            if len(sprout_pool['appliances']) >= required_app_count:
                print("Processing pool...")
                apps = []
                for app in sprout_pool['appliances']:
                    apps.append(IPAppliance(app['ip_address']))
                sprout_client.set_pool_description(sprout_poolid, desc)
            else:
                sys.exit("Pool does not meet the minimum size requirements!")
        else:
            sys.exit("Pool not found!")

    else:
        print("Provisioning appliances")
        apps, request_id = provision_appliances(count=required_app_count,
                                                cfme_version=cfme_version,
                                                provider_type=provider_type,
                                                provider=provider,
                                                lease_time=lease_time)
        print("Appliance pool lease time is {}".format(lease))
        sprout_client.set_pool_description(request_id, desc)
        print("Appliances Provisioned")
    print("Configuring Replicated Environment")
    global_app = apps[0]
    gip = global_app.hostname

    remote_apps = apps[1:]

    print("Global Appliance Configuration")
    app_creds = {
        "username": credentials["database"]["username"],
        "password": credentials["database"]["password"],
        "sshlogin": credentials["ssh"]["username"],
        "sshpass": credentials["ssh"]["password"],
    }

    app_params = dict(region=99,
                      dbhostname='localhost',
                      username=app_creds['username'],
                      password=app_creds['password'],
                      dbname='vmdb_production',
                      dbdisk=global_app.unpartitioned_disks[0])
    global_app.appliance_console_cli.configure_appliance_internal(**app_params)
    global_app.evmserverd.wait_for_running()
    global_app.wait_for_web_ui()

    print("Done: Global @ {}".format(gip))

    for num, app in enumerate(remote_apps):
        region_n = str((num + 1) * 10)
        print("Remote Appliance Configuration")
        app_params = dict(region=region_n,
                          dbhostname='localhost',
                          username=app_creds['username'],
                          password=app_creds['password'],
                          dbname='vmdb_production',
                          dbdisk=app.unpartitioned_disks[0],
                          fetch_key=gip,
                          sshlogin=app_creds['sshlogin'],
                          sshpass=app_creds['sshpass'])

        app.appliance_console_cli.configure_appliance_internal_fetch_key(
            **app_params)
        app.evmserverd.wait_for_running()
        app.wait_for_web_ui()
        print("Done: Remote @ {}, region: {}".format(app.hostname, region_n))

        print("Configuring Replication")
        print("Setup - Replication on remote appliance")
        app.set_pglogical_replication(replication_type=':remote')

    print("Setup - Replication on global appliance")
    global_app.set_pglogical_replication(replication_type=':global')
    for app in remote_apps:
        global_app.add_pglogical_replication_subscription(app.hostname)

    random.shuffle(remote_apps)
    if add_prov:
        for app, prov_id in zip(cycle(remote_apps), add_prov):
            stack.push(app)
            prov = get_crud(prov_id)
            print("Adding provider {} to appliance {}".format(
                prov_id, app.hostname))
            prov.create_rest()
            stack.pop()

    print("Done!")
コード例 #44
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://{}/'.format(address)
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.address == address
コード例 #45
0
ファイル: clone_template.py プロジェクト: lcouzens/cfme_tests
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to %s', kwargs['provider'])

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance
            flavor = kwargs.get('flavor', 'c3.xlarge')
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
        deploy_args['key_name'] = "shared"
        # we want to override default cloud-init which disables root login and password login
        cloud_init_dict = {
            'chpasswd':
            {
                'expire': False,
                'list': '{}:{}\n'.format(cred['ssh']['username'], cred['ssh']['password'])
            },
            'disable_root': False,
            'ssh_pwauth': True
        }
        cloud_init = "#cloud-config\n{}".format(yaml.safe_dump(cloud_init_dict,
                                                               default_flow_style=False))
        deploy_args['user_data'] = cloud_init
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        logger.info("Available flavors on provider: %s", available_flavors)
        generic_flavors = filter(lambda f: f in available_flavors, flavors)

        try:
            flavor = (kwargs.get('flavor') or
                      provider_dict.get('sprout', {}).get('flavor_name') or
                      generic_flavors[0])
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        logger.info('Selected flavor: %s', flavor)

        deploy_args['flavor_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    elif provider_type == 'openshift':
        trackerbot = api()
        raw_tags = trackerbot.providertemplate().get(provider=kwargs['provider'],
                                                     template=deploy_args['template'])['objects']
        raw_tags = raw_tags[-1]['template'].get('custom_data', "{}")
        deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS']
    # Do it!
    try:
        logger.info(
            'Cloning %s to %s on %s',
            deploy_args['template'], deploy_args['vm_name'], kwargs['provider']
        )
        # TODO: change after openshift wrapanapi refactor
        output = None  # 'output' is only used for openshift providers
        if isinstance(provider, Openshift):
            output = provider.deploy_template(**deploy_args)
        else:
            template = provider.get_template(deploy_args['template'])
            template.deploy(**deploy_args)

    except Exception as e:
        logger.exception(e)
        logger.error('template deploy failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy %s', deploy_args['vm_name'])
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    # TODO: change after openshift wrapanapi refactor
    if isinstance(provider, Openshift):
        if provider.is_vm_running(deploy_args['vm_name']):
            logger.info('VM %s is running', deploy_args['vm_name'])
        else:
            logger.error('VM %s is not running', deploy_args['vm_name'])
            return 10
    else:
        vm = provider.get_vm(deploy_args['vm_name'])
        vm.ensure_state(VmState.RUNNING, timeout='5m')
        if provider_type == 'gce':
            try:
                attach_gce_disk(vm)
            except Exception:
                logger.exception("Failed to attach db disk")
                destroy_vm(provider, deploy_args['vm_name'])
                return 10

    if provider_type == 'openshift':
        ip = output['url']
    else:
        try:
            ip, _ = wait_for(lambda: vm.ip, num_sec=1200, fail_condition=None)
            logger.info('IP Address returned is %s', ip)
        except Exception as e:
            logger.exception(e)
            logger.error('IP address not returned')
            return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(hostname=ip)
            else:
                app_args = (kwargs['provider'], deploy_args['vm_name'])
                app_kwargs = {}
                if provider_type == 'openshift':
                    ocp_creds = cred[provider_dict['credentials']]
                    ssh_creds = cred[provider_dict['ssh_creds']]
                    app_kwargs = {
                        'project': output['project'],
                        'db_host': output['external_ip'],
                        'container': 'cloudforms-0',
                        'hostname': ip,
                        'openshift_creds': {
                            'hostname': provider_dict['hostname'],
                            'username': ocp_creds['username'],
                            'password': ocp_creds['password'],
                            'ssh': {
                                'username': ssh_creds['username'],
                                'password': ssh_creds['password'],
                            },
                        }
                    }
                app = Appliance.from_provider(*app_args, **app_kwargs)

            if provider_type == 'ec2':
                wait_for(
                    cloud_init_done, func_args=[app], num_sec=600, handle_exception=True, delay=5)
            if provider_type == 'gce':
                app.configure_gce()
            elif provider_type == 'openshift':
                # openshift appliances don't need any additional configuration
                pass
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance.from_provider(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            result = ssh_client.run_command('find /root/anaconda-post.log')
            if result.success:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        destroy_vm(app.provider, deploy_args['vm_name'])
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        # todo: to get rid of those scripts in jenkins or develop them from scratch
        with open(kwargs['outfile'], 'w') as outfile:
            if provider_type == 'openshift':
                output_data = {
                    'appliances':
                        [
                            {
                                'project': output['project'],
                                'db_host': output['external_ip'],
                                'hostname': ip,
                                'container': 'cloudforms-0',
                                'openshift_creds': {
                                    'hostname': provider_dict['hostname'],
                                    'username': ocp_creds['username'],
                                    'password': ocp_creds['password'],
                                    'ssh': {
                                        'username': ssh_creds['username'],
                                        'password': ssh_creds['password'],
                                    }
                                },
                            },
                        ],
                }
            else:
                output_data = {
                    'appliances':
                        [{'hostname': ip}]
                }
            yaml_data = yaml.safe_dump(output_data, default_flow_style=False)
            outfile.write(yaml_data)

        # In addition to the outfile, drop the ip address on stdout for easy parsing
        print(yaml_data)
コード例 #46
0
ファイル: remote.py プロジェクト: hhovsepy/cfme_tests
    config.pluginmanager.set_blocked('fixtures.parallelizer')
    for pluginarg in config.option.plugins:
        config.pluginmanager.consider_pluginarg(pluginarg)
    return config


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('slaveid', help='The name of this slave')
    parser.add_argument('appliance_json', help='The json data about the used appliance')
    parser.add_argument('ts', help='The timestap to use for collections')
    args = parser.parse_args()

    from cfme.utils.appliance import IPAppliance, stack
    appliance = IPAppliance.from_json(args.appliance_json)
    stack.push(appliance)

    # overwrite the default logger before anything else is imported,
    # to get our best chance at having everything import the replaced logger
    import cfme.utils.log
    cfme.utils.log.setup_for_worker(args.slaveid)

    from fixtures import terminalreporter
    from fixtures.pytest_store import store
    from cfme.utils import conf

    conf.runtime['env']['slaveid'] = args.slaveid
    conf.runtime['env']['ts'] = args.ts
    store.parallelizer_role = 'slave'