コード例 #1
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        'url',
        nargs='?',
        default=None,
        help='URL of target appliance, e.g. "https://ip_or_host/"')
    parser.add_argument(
        '--num-sec',
        default=600,
        type=int,
        dest='num_sec',
        help=
        'Maximum number of seconds to wait before giving up, default 600 (10 minutes)'
    )

    args = parser.parse_args()
    if args.url:
        ip_a = IPAppliance.from_url(args.url)
    else:
        ip_a = IPAppliance()
    result = ip_a.wait_for_web_ui(timeout=args.num_sec)

    if not result:
        return 1
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--address',
                        help='hostname or ip address of target appliance',
                        default=None)
    parser.add_argument('--vddk_url', help='url to download vddk pkg')
    parser.add_argument('--reboot',
                        help='reboot after installation ' +
                        '(required for proper operation)',
                        action="store_true")
    parser.add_argument('--force',
                        help='force installation if version detected',
                        action="store_true")

    args = parser.parse_args()

    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(address=urlparse(args.address).netloc)

    appliance.install_vddk(reboot=args.reboot,
                           force=args.force,
                           vddk_url=args.vddk_url,
                           log_callback=log)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('--address',
                        help='hostname or ip address of target appliance',
                        default=None)
    parser.add_argument('--sdk_url',
                        help='url to download sdk pkg',
                        default=cfme_data.get("basic_info",
                                              {}).get("netapp_sdk_url"))
    parser.add_argument('--restart',
                        help='restart evmserverd after installation ' +
                        '(required for proper operation)',
                        action="store_true")

    args = parser.parse_args()
    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(address=args.address)
    print('Address: {}'.format(appliance.address))
    print('SDK URL: {}'.format(args.sdk_url))
    print('Restart: {}'.format(args.restart))

    appliance.install_netapp_sdk(sdk_url=args.sdk_url,
                                 reboot=args.restart,
                                 log_callback=log)
コード例 #4
0
ファイル: ntp_clock_set.py プロジェクト: anewmanRH/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument("address", nargs="?", default=None, help="hostname or ip address of target appliance")
    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    ip_a.fix_ntp_clock()
    print("Time was set")
コード例 #5
0
ファイル: update_rhel.py プロジェクト: FilipB/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance')
    parser.add_argument("-u", "--url", help="url(s) to use for update",
        dest="urls", action="append")
    parser.add_argument("-c", "--cleanup", help="Whether to cleanup /etc/yum.repos.d before start",
        dest="cleanup", action="store_true")
    parser.add_argument("--no_wait_ui", help="Whether to NOT wait for UI after reboot",
        dest="no_wait_ui", action="store_false")
    parser.add_argument('--reboot', help='reboot after installation ' +
        '(required for proper operation)', action="store_true", default=False)

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    # Don't reboot here, so we can print updates to the console when we do
    res = ip_a.update_rhel(*args.urls, reboot=False, streaming=True, cleanup=args.cleanup)

    if res.rc == 0:
        if args.reboot:
            print('Rebooting')
            ip_a.reboot(wait_for_web_ui=args.no_wait_ui)
        print('Appliance update complete')

    return res.rc
コード例 #6
0
ファイル: update_rhel.py プロジェクト: vrutkovs/cfme_tests
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        help='hostname or ip address of target appliance')
    parser.add_argument("-u",
                        "--url",
                        help="url(s) to use for update",
                        dest="urls",
                        action="append")
    parser.add_argument('--reboot',
                        help='reboot after installation ' +
                        '(required for proper operation)',
                        action="store_true",
                        default=False)

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    # Don't reboot here, so we can print updates to the console when we do
    res = ip_a.update_rhel(*args.urls, reboot=False, streaming=True)

    if res.rc == 0:
        if args.reboot:
            print('Rebooting')
            ip_a.reboot()
        print('Appliance update complete')

    return res.rc
コード例 #7
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('db_address',
        help='hostname or ip address of external database')
    parser.add_argument('--database', default='vmdb_production',
        help='name of the external database')
    parser.add_argument('--region', default=0, type=int,
        help='region to assign to the new DB')
    parser.add_argument('--username', default=credentials['database']['username'],
        help='username for external database')
    parser.add_argument('--password', default=credentials['database']['password'],
        help='password for external database')
    args = parser.parse_args()

    print('Initializing Appliance External DB')
    ip_a = IPAppliance(args.address)
    status, out = ip_a.enable_external_db(args.db_address, args.region, args.database,
        args.username, args.password)

    if status != 0:
        print('Enabling DB failed with error:')
        print(out)
        sys.exit(1)
    else:
        print('DB Enabled, evm watchdog should start the UI shortly.')
コード例 #8
0
def pytest_configure(config):
    global appliance
    global pool_id
    global sprout
    if not config.option.appliances and (config.option.use_sprout and
                                         config.option.sprout_appliances == 1):
        terminal = reporter()
        sprout = SproutClient.from_config()
        terminal.write("Requesting a single appliance from sprout...\n")
        pool_id = sprout.request_appliances(
            config.option.sprout_group,
            count=config.option.sprout_appliances,
            version=config.option.sprout_version,
            date=config.option.sprout_date,
            lease_time=config.option.sprout_timeout)
        terminal.write(
            "Appliance pool {}. Waiting for fulfillment ...\n".format(pool_id))
        at_exit(destroy_the_pool)
        if config.option.sprout_desc is not None:
            sprout.set_pool_description(pool_id,
                                        str(config.option.sprout_desc))
        try:
            result = wait_for(
                lambda: sprout.request_check(pool_id)["fulfilled"],
                num_sec=config.option.sprout_provision_timeout * 60,
                delay=5,
                message="requesting appliance was fulfilled")
        except:
            pool = sprout.request_check(pool_id)
            dump_pool_info(lambda x: terminal.write("{}\n".format(x)), pool)
            terminal.write("Destroying the pool on error.\n")
            sprout.destroy_pool(pool_id)
            raise
        terminal.write("Provisioning took {0:.1f} seconds\n".format(
            result.duration))
        request = sprout.request_check(pool_id)
        ip_address = request["appliances"][0]["ip_address"]
        terminal.write(
            "Appliance requested at address {} ...\n".format(ip_address))
        reset_timer(sprout, pool_id, config.option.sprout_timeout)
        terminal.write("Appliance lease timer is running ...\n")
        appliance = IPAppliance(address=ip_address)
        appliance.push()
        # Retrieve and print the template_name for Jenkins to pick up
        template_name = request["appliances"][0]["template_name"]
        conf.runtime["cfme_data"]["basic_info"][
            "appliance_template"] = template_name
        terminal.write("appliance_template=\"{}\";\n".format(template_name))
        with project_path.join('.appliance_template').open(
                'w') as template_file:
            template_file.write(
                'export appliance_template="{}"'.format(template_name))
        terminal.write("Single appliance Sprout setup finished.\n")
        # And set also the appliances_provider
        provider = request["appliances"][0]["provider"]
        terminal.write("appliance_provider=\"{}\";\n".format(provider))
        conf.runtime["cfme_data"]["basic_info"][
            "appliances_provider"] = provider
    yield
コード例 #9
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance',
        nargs='?', default=None)
    parser.add_argument('--with_ssl', help='update for ssl connections', action="store_true")

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    return ip_a.loosen_pgssl()
コード例 #10
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        nargs="?",
                        default=None,
                        help='hostname or ip address of target appliance')
    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    ip_a.fix_ntp_clock()
    print("Time was set")
コード例 #11
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', nargs='?', default=None,
        help='hostname or ip address of target appliance')

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)

    status = ip_a.precompile_assets()
    if status == 0:
        print("CFME UI worker restarted, UI should be available shortly")
    return status
コード例 #12
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('source', nargs='?', default='ManageIQ',
        help='Source Domain name')
    parser.add_argument('dest', nargs='?', default='Default',
        help='Destination Domain name')
    args = parser.parse_args()

    ip_a = IPAppliance(args.hostname)
    status, out = ip_a.clone_domain(args.source, args.dest)
    return status
コード例 #13
0
ファイル: merkyl_deploy.py プロジェクト: FilipB/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname', nargs='?', default=None,
        help='hostname or ip address of target appliance')
    parser.add_argument('start', action="store_true", default=False, help='Start Merkyl?')
    args = parser.parse_args()

    if args.hostname is not None:
        ip_a = IPAppliance(args.hostname)
    else:
        ip_a = IPAppliance.from_url()

    ip_a.deploy_merkyl(args.start)
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        help='hostname or ip address of target appliance',
                        nargs='?',
                        default=None)
    parser.add_argument('--with_ssl',
                        help='update for ssl connections',
                        action="store_true")

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    return ip_a.loosen_pgssl()
コード例 #15
0
def mark(api, provider_key, template, usable, diagnose):
    # set some defaults
    diagnosis = None
    build_number = None
    if not usable:
        build_number = os.environ.get('BUILD_NUMBER', None)
        if diagnose:
            # diagnose will return None on a usable appliance, so don't bother
            from utils.appliance import IPAppliance
            ipa = IPAppliance()
            diagnosis = ipa.diagnose_evm_failure()
            if diagnosis:
                logger.error('Appliance failed: {}'.format(diagnosis.split(os.linesep)[0]))
    trackerbot.mark_provider_template(api, provider_key, template, tested=True, usable=usable,
        diagnosis=diagnosis, build_number=build_number)
コード例 #16
0
ファイル: plugin.py プロジェクト: jdemon519/cfme_tests
def pytest_configure(config):
    if config.getoption("appliances"):
        return
    if not config.getoption('--use-sprout'):
        return

    provision_request = SproutProvisioningRequest.from_config(config)

    mgr = config._sprout_mgr = SproutManager()
    requested_appliances = mgr.request_appliances(provision_request)
    config.option.appliances[:] = []
    appliances = config.option.appliances
    # Push an appliance to the stack to have proper reference for test collection
    # FIXME: this is a bad hack based on the need for controll of collection partitioning
    appliance_stack.push(
        IPAppliance(address=requested_appliances[0]["ip_address"]))
    log.info("Appliances were provided:")
    for appliance in requested_appliances:
        url = "https://{}/".format(appliance["ip_address"])
        appliances.append(url)
        log.info("- %s is %s", url, appliance['name'])

    mgr.reset_timer()
    # Set the base_url for collection purposes on the first appliance
    conf.runtime["env"]["base_url"] = appliances[0]
    # Retrieve and print the template_name for Jenkins to pick up
    template_name = requested_appliances[0]["template_name"]
    conf.runtime["cfme_data"]["basic_info"]["appliance_template"] = template_name
    log.info("appliance_template: %s", template_name)
    with project_path.join('.appliance_template').open('w') as template_file:
        template_file.write('export appliance_template="{}"'.format(template_name))
    log.info("Sprout setup finished.")
コード例 #17
0
ファイル: ipappliance.py プロジェクト: vprusa/cfme_tests
def call_appliance(order, result_dict, ip_address, action, args, kwargs):
    # Given a provider class, find the named method and call it with
    # *args. This could possibly be generalized for other CLI tools.
    appliance = IPAppliance(ip_address)
    result = None

    try:
        call = getattr(appliance, action)
    except AttributeError:
        raise Exception('Action "{}" not found'.format(action))
    if isinstance(getattr(type(appliance), action), property):
        result = call
    else:
        try:
            argspec = inspect.getargspec(call)
        except TypeError:
            try:
                result = call(*args, **kwargs)
            except Exception as e:
                result = e
        else:
            if argspec.keywords is not None or 'log_callback' in argspec.args:
                kwargs['log_callback'] = generate_log_callback(ip_address)
            try:
                result = call(*args, **kwargs)
            except Exception as e:
                result = e
    with lock:
        result_dict[order] = result
コード例 #18
0
def test_ipappliance_use_baseurl():
    ip_a = IPAppliance()
    ip_a_parsed = urlparse(ip_a.url)
    env_parsed = urlparse(store.base_url)
    assert (ip_a_parsed.scheme, ip_a_parsed.netloc) == (env_parsed.scheme,
                                                        env_parsed.netloc)
    assert ip_a.address in store.base_url
コード例 #19
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        "--address", help="hostname or ip address of target appliance", default=env.get("base_url", None)
    )
    parser.add_argument("--vddk_url", help="url to download vddk pkg")
    parser.add_argument(
        "--reboot", help="reboot after installation " + "(required for proper operation)", action="store_true"
    )
    parser.add_argument("--force", help="force installation if version detected", action="store_true")

    args = parser.parse_args()

    address = urlparse(args.address).netloc

    appliance = IPAppliance(address=address)
    appliance.install_vddk(reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
コード例 #20
0
 def collection_appliance(self):
     # if parallelized, this is decided in sessionstart and written to the conf
     if store.parallelizer_role == 'slave':
         from utils.appliance import IPAppliance
         return IPAppliance(conf['.ui-coverage']['collection_appliance'])
     else:
         # otherwise, coverage only happens on one appliance
         return store.current_appliance
コード例 #21
0
def get_appliance(appliance_ip):
    """Checks an appliance is not None and if so, loads the appropriate things"""
    from utils.appliance import IPAppliance, get_or_create_current_appliance
    if not appliance_ip:
        app = get_or_create_current_appliance()
    else:
        app = IPAppliance(appliance_ip)
    return app
コード例 #22
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('url', nargs='?', default=None,
        help='URL of target appliance, e.g. "https://ip_or_host/"')
    parser.add_argument('--num-sec', default=600, type=int, dest='num_sec',
        help='Maximum number of seconds to wait before giving up, default 600 (10 minutes)')

    args = parser.parse_args()
    if args.url:
        ip_a = IPAppliance.from_url(args.url)
    else:
        ip_a = IPAppliance()
    result = ip_a.wait_for_web_ui(timeout=args.num_sec)

    if not result:
        return 1
コード例 #23
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
        help='hostname or ip address of target appliance')
    parser.add_argument('--region', default=0, type=int,
        help='region to assign to the new DB')
    args = parser.parse_args()

    print 'Initializing Appliance Internal DB'
    ip_a = IPAppliance(args.address)
    status, out = ip_a.enable_internal_db(args.region)

    if status != 0:
        print 'Enabling DB failed with error:'
        print out
        sys.exit(1)
    else:
        print 'DB Enabled, evm watchdog should start the UI shortly.'
コード例 #24
0
 def current_appliance(self):
     if not self._current_appliance:
         from utils.appliance import IPAppliance
         base_url = conf.env['base_url']
         if base_url is None or str(base_url.lower()) == 'none':
             raise ValueError(
                 'No IP address specified! Specified: {}'.format(
                     repr(base_url)))
         self._current_appliance.append(IPAppliance(urlparse(base_url)))
     return self._current_appliance[-1]
コード例 #25
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('hostname',
                        nargs='?',
                        default=None,
                        help='hostname or ip address of target appliance')
    parser.add_argument('start',
                        action="store_true",
                        default=False,
                        help='Start Merkyl?')
    args = parser.parse_args()

    if args.hostname is not None:
        ip_a = IPAppliance(args.hostname)
    else:
        ip_a = IPAppliance.from_url()

    ip_a.deploy_merkyl(args.start)
コード例 #26
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
        help='hostname or ip address of target appliance')
    parser.add_argument('--region', default=0, type=int,
        help='region to assign to the new DB')
    args = parser.parse_args()

    print('Initializing Appliance Internal DB')
    ip_a = IPAppliance(args.address)
    status, out = ip_a.enable_internal_db(args.region)

    if status != 0:
        print('Enabling DB failed with error:')
        print(out)
        sys.exit(1)
    else:
        print('DB Enabled, evm watchdog should start the UI shortly.')
コード例 #27
0
def fn(method, *args, **kwargs):
    """Helper to access the right properties"""
    app = IPAppliance(kwargs['appliance_ip'])
    descriptor = getattr(IPAppliance, method)
    if isinstance(descriptor, (cached_property, property)):
        out = getattr(app, method)
    else:
        out = getattr(app, method)(*args, **kwargs)
    if out is not None:
        print(out)
コード例 #28
0
def test_context_hack(monkeypatch):

    ip_a = IPAppliance.from_url('http://127.0.0.2/')

    def not_good(*k):
        raise RuntimeError()
    monkeypatch.setattr(ip_a, '_screenshot_capture_at_context_leave', not_good)

    with pytest.raises(ValueError):
        with ip_a:
            raise ValueError("test")
コード例 #29
0
ファイル: update_rhel.py プロジェクト: petrblaho/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance')
    parser.add_argument("-u", "--url", help="url(s) to use for update",
        dest="urls", action="append")
    parser.add_argument('--reboot', help='reboot after installation ' +
        '(required for proper operation)', action="store_true")

    args = parser.parse_args()

    ip_a = IPAppliance(args.address)
    status, out = ip_a.update_rhel(*args.urls)

    if status == 0:
        print out
        print 'Appliance update complete'
        if args.reboot:
            ip_a.reboot()

    return status
コード例 #30
0
ファイル: install_vddk.py プロジェクト: dajohnso/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        '--address',
        help='hostname or ip address of target appliance', default=None)
    parser.add_argument('--vddk_url', help='url to download vddk pkg')
    parser.add_argument('--reboot', help='reboot after installation ' +
                        '(required for proper operation)', action="store_true")
    parser.add_argument('--force',
                        help='force installation if version detected', action="store_true")

    args = parser.parse_args()

    if not args.address:
        appliance = get_or_create_current_appliance()
    else:
        appliance = IPAppliance(address=urlparse(args.address).netloc)

    appliance.install_vddk(
        reboot=args.reboot, force=args.force, vddk_url=args.vddk_url, log_callback=log)
コード例 #31
0
ファイル: update_rhel.py プロジェクト: pombredanne/cfme_tests
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address', help='hostname or ip address of target appliance')
    parser.add_argument("-u", "--url", help="url(s) to use for update",
        dest="urls", action="append")
    parser.add_argument('--reboot', help='reboot after installation ' +
        '(required for proper operation)', action="store_true", default=False)

    args = parser.parse_args()
    ip_a = IPAppliance(args.address)
    # Don't reboot here, so we can print updates to the console when we do
    res = ip_a.update_rhel(*args.urls, reboot=False, streaming=True)

    if res.rc == 0:
        if args.reboot:
            print 'Rebooting'
            ip_a.reboot()
        print 'Appliance update complete'

    return res.rc
コード例 #32
0
def main():
    parser = argparse.ArgumentParser(epilog=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        '--address',
        help='hostname or ip address of target appliance',
        default=parse_if_not_none(env.get("base_url", None)))
    parser.add_argument(
        '--sdk_url',
        help='url to download sdk pkg',
        default=cfme_data.get("basic_info", {}).get("netapp_sdk_url", None))
    parser.add_argument('--restart', help='restart evmserverd after installation ' +
        '(required for proper operation)', action="store_true")

    args = parser.parse_args()
    print('Address: {}'.format(args.address))
    print('SDK URL: {}'.format(args.sdk_url))
    print('Restart: {}'.format(args.restart))

    appliance = IPAppliance(address=args.address)
    appliance.install_netapp_sdk(sdk_url=args.sdk_url, reboot=args.restart, log_callback=log)
コード例 #33
0
def test_context_hack(monkeypatch):

    ip_a = IPAppliance.from_url('http://127.0.0.2/')

    def not_good(*k):
        raise RuntimeError()

    monkeypatch.setattr(ip_a, '_screenshot_capture_at_context_leave', not_good)

    with pytest.raises(ValueError):
        with ip_a:
            raise ValueError("test")
コード例 #34
0
def main():
    parser = argparse.ArgumentParser(
        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('address',
                        nargs='?',
                        default=None,
                        help='hostname or ip address of target appliance')
    parser.add_argument('-R',
                        '--reverse',
                        help='flag to indicate the patch should be undone',
                        action='store_true',
                        default=False,
                        dest='reverse')

    args = parser.parse_args()
    return IPAppliance(args.address).patch_ajax_wait(args.reverse)
コード例 #35
0
 def provision_appliances(self,
                          count=1,
                          preconfigured=False,
                          version=None,
                          stream=None,
                          provider=None,
                          lease_time=120,
                          ram=None,
                          cpu=None):
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method('request_appliances',
                                   preconfigured=preconfigured,
                                   version=version,
                                   group=stream,
                                   provider=provider,
                                   lease_time=lease_time,
                                   ram=ram,
                                   cpu=cpu,
                                   count=count)
     wait_for(lambda: self.call_method('request_check', str(request_id))[
         'finished'],
              num_sec=300,
              message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         appliances.append(IPAppliance(appliance['ip_address']))
     return appliances, request_id
コード例 #36
0
 def ipapp(self):
     return IPAppliance(self.ip_address)
コード例 #37
0
def test_ipappliance_from_address():
    address = '1.2.3.4'
    ip_a = IPAppliance(address)
    assert ip_a.address == address
    assert ip_a.url == 'https://{}/'.format(address)
コード例 #38
0
def test_ipappliance_managed_providers(infra_provider):
    ip_a = IPAppliance()
    assert infra_provider in ip_a.managed_known_providers
コード例 #39
0
    def __init__(self, config):
        self.config = config
        self.session = None
        self.session_finished = False
        self.countfailures = 0
        self.collection = OrderedDict()
        self.sent_tests = 0
        self.log = create_sublogger('master')
        self.maxfail = config.getvalue("maxfail")
        self._failed_collection_errors = {}
        self.terminal = store.terminalreporter
        self.trdist = None
        self.slaves = SlaveDict()
        self.slave_urls = SlaveDict()
        self.slave_tests = defaultdict(set)
        self.test_groups = self._test_item_generator()
        self.failed_slave_test_groups = deque()
        self.slave_spawn_count = 0
        self.sprout_client = None
        self.sprout_timer = None
        self.sprout_pool = None
        if not self.config.option.use_sprout:
            # Without Sprout
            self.appliances = self.config.option.appliances
        else:
            # Using sprout
            self.sprout_client = SproutClient.from_config()
            self.terminal.write(
                "Requesting {} appliances from Sprout at {}\n".format(
                    self.config.option.sprout_appliances,
                    self.sprout_client.api_entry))
            pool_id = self.sprout_client.request_appliances(
                self.config.option.sprout_group,
                count=self.config.option.sprout_appliances,
                version=self.config.option.sprout_version,
                date=self.config.option.sprout_date,
                lease_time=self.config.option.sprout_timeout)
            self.terminal.write(
                "Pool {}. Waiting for fulfillment ...\n".format(pool_id))
            self.sprout_pool = pool_id
            at_exit(self.sprout_client.destroy_pool, self.sprout_pool)
            if self.config.option.sprout_desc is not None:
                self.sprout_client.set_pool_description(
                    pool_id, str(self.config.option.sprout_desc))
            try:
                result = wait_for(
                    lambda: self.sprout_client.request_check(self.sprout_pool)[
                        "fulfilled"],
                    num_sec=self.config.option.sprout_provision_timeout * 60,
                    delay=5,
                    message="requesting appliances was fulfilled")
            except:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)),
                               pool)
                self.terminal.write("Destroying the pool on error.\n")
                self.sprout_client.destroy_pool(pool_id)
                raise
            else:
                pool = self.sprout_client.request_check(self.sprout_pool)
                dump_pool_info(lambda x: self.terminal.write("{}\n".format(x)),
                               pool)
            self.terminal.write("Provisioning took {0:.1f} seconds\n".format(
                result.duration))
            request = self.sprout_client.request_check(self.sprout_pool)
            self.appliances = []
            # Push an appliance to the stack to have proper reference for test collection
            IPAppliance(address=request["appliances"][0]["ip_address"]).push()
            self.terminal.write("Appliances were provided:\n")
            for appliance in request["appliances"]:
                url = "https://{}/".format(appliance["ip_address"])
                self.appliances.append(url)
                self.terminal.write("- {} is {}\n".format(
                    url, appliance['name']))
            map(lambda a: "https://{}/".format(a["ip_address"]),
                request["appliances"])
            self._reset_timer()
            # Set the base_url for collection purposes on the first appliance
            conf.runtime["env"]["base_url"] = self.appliances[0]
            # Retrieve and print the template_name for Jenkins to pick up
            template_name = request["appliances"][0]["template_name"]
            conf.runtime["cfme_data"]["basic_info"][
                "appliance_template"] = template_name
            self.terminal.write(
                "appliance_template=\"{}\";\n".format(template_name))
            with project_path.join('.appliance_template').open(
                    'w') as template_file:
                template_file.write(
                    'export appliance_template="{}"'.format(template_name))
            self.terminal.write("Parallelized Sprout setup finished.\n")
            self.slave_appliances_data = {}
            for appliance in request["appliances"]:
                self.slave_appliances_data[appliance["ip_address"]] = (
                    appliance["template_name"], appliance["provider"])

        # set up the ipc socket
        zmq_endpoint = 'tcp://127.0.0.1:{}'.format(random_port())
        ctx = zmq.Context.instance()
        self.sock = ctx.socket(zmq.ROUTER)
        self.sock.bind('%s' % zmq_endpoint)

        # clean out old slave config if it exists
        slave_config = conf_path.join('slave_config.yaml')
        slave_config.check() and slave_config.remove()

        # write out the slave config
        conf.runtime['slave_config'] = {
            'args':
            self.config.args,
            'options':
            self.config.option.__dict__,
            'zmq_endpoint':
            zmq_endpoint,
            'sprout':
            self.sprout_client is not None and self.sprout_pool is not None,
        }
        if hasattr(self, "slave_appliances_data"):
            conf.runtime['slave_config'][
                "appliance_data"] = self.slave_appliances_data
        conf.runtime['slave_config']['options'][
            'use_sprout'] = False  # Slaves don't use sprout
        conf.save('slave_config')

        for i, base_url in enumerate(self.appliances):
            self.slave_urls.add(base_url)

        for slave in sorted(self.slave_urls):
            self.print_message("using appliance {}".format(
                self.slave_urls[slave]),
                               slave,
                               green=True)

        # Start the recv queue
        self._recv_queue = deque()
        recv_queuer = Thread(target=_recv_queue, args=(self, ))
        recv_queuer.daemon = True
        recv_queuer.start()
コード例 #40
0
ファイル: pytest_store.py プロジェクト: vprusa/cfme_tests
 def current_appliance(self):
     if not self._current_appliance:
         from utils.appliance import IPAppliance
         self._current_appliance.append(IPAppliance(urlparse(conf.env['base_url'])))
     return self._current_appliance[-1]
コード例 #41
0
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']}
    master_appliance = IPAppliance(address=scenario['replication_master']['ip_address'],
                                   openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(ssh_client_master,
            appliance.address)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles({role: True for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #42
0
ファイル: __init__.py プロジェクト: patchkez/cfme_tests
 def get(self, slave):
     with self.pool_lock:
         if not self._pool:
             for test_group in self.test_groups:
                 self._pool.append(test_group)
                 for test in test_group:
                     if '[' in test:
                         found_prov = []
                         for pv in self.provs:
                             if pv in test:
                                 found_prov.append(pv)
                                 break
                         provs = list(set(found_prov).intersection(self.provs))
                         if provs:
                             self.used_prov = self.used_prov.union(set(provs))
             if self.used_prov:
                 self.ratio = float(len(self.slaves)) / float(len(self.used_prov))
             else:
                 self.ratio = 0.0
         if not self._pool:
             raise StopIteration
         current_allocate = self.slave_allocation.get(slave, None)
         # num_provs_list = [len(v) for k, v in self.slave_allocation.iteritems()]
         # average_num_provs = sum(num_provs_list) / float(len(self.slaves))
         appliance_num_limit = 2
         for test_group in self._pool:
             for test in test_group:
                 # If the test is parametrized...
                 if '[' in test:
                     found_prov = []
                     for pv in self.provs:
                         if pv in test:
                             found_prov.append(pv)
                             break
                     # The line below can probably be removed now, since we compare
                     # providers in the loop above with self.provs, which is a list
                     # of all providers.
                     provs = list(set(found_prov).intersection(self.provs))
                     # If the parametrization contains a provider...
                     if provs:
                         prov = provs[0]
                         # num_slave_with_prov = len([sl for sl, provs_list
                         #    in self.slave_allocation.iteritems()
                         #    if prov in provs_list])
                         # If this slave/appliance already has providers then...
                         if current_allocate:
                             # If the slave has _our_ provider
                             if prov in current_allocate:
                                 # provider is already with the slave, so just return the tests
                                 self._pool.remove(test_group)
                                 return test_group
                             # If the slave doesn't have _our_ provider
                             else:
                                 # Check to see how many slaves there are with this provider
                                 if len(self.slave_allocation[slave]) >= appliance_num_limit:
                                     continue
                                 else:
                                     # Adding provider to slave since there are not too many
                                     self.slave_allocation[slave].append(prov)
                                     self._pool.remove(test_group)
                                     return test_group
                         # If this slave doesn't have any providers...
                         else:
                             # Adding provider to slave
                             self.slave_allocation[slave].append(prov)
                             self._pool.remove(test_group)
                             return test_group
                     else:
                         # No providers - ie, not a provider parametrized test
                         self._pool.remove(test_group)
                         return test_group
                 else:
                     # No params, so no need to think about providers
                     self._pool.remove(test_group)
                     return test_group
             # Here means no tests were able to be sent
         for test_group in self._pool:
             for test in test_group:
                 # If the test is parametrized...
                 if '[' in test:
                     found_prov = []
                     for pv in self.provs:
                         if pv in test:
                             found_prov.append(pv)
                             break
                     # The line below can probably be removed now, since we compare
                     # providers in the loop above with self.provs, which is a list
                     # of all providers.
                     provs = list(set(found_prov).intersection(self.provs))
                     # If the parametrization contains a provider...
                     if provs:
                         # Already too many slaves with provider
                         app_url = self.slave_urls[slave]
                         app_ip = urlparse(app_url).netloc
                         app = IPAppliance(app_ip)
                         self.print_message('cleansing appliance', slave,
                             purple=True)
                         try:
                             app.delete_all_providers()
                         except:
                             self.print_message('cloud not cleanse', slave,
                             red=True)
                         self.slave_allocation[slave] = [prov]
                         self._pool.remove(test_group)
                         return test_group
         return []
コード例 #43
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://%s/' % address
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.address == address
コード例 #44
0
ファイル: clone_template.py プロジェクト: psav/cfme_tests
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get("deploy", None):
        kwargs["configure"] = True
        kwargs["outfile"] = "appliance_ip_address_1"
        provider_data = utils.conf.provider_data
        providers = provider_data["management_systems"]
        provider_dict = provider_data["management_systems"][kwargs["provider"]]
        credentials = {
            "username": provider_dict["username"],
            "password": provider_dict["password"],
            "tenant": provider_dict["template_upload"].get("tenant_admin", "admin"),
            "auth_url": provider_dict.get("auth_url", None),
        }
        provider = get_mgmt(kwargs["provider"], providers=providers, credentials=credentials)
        flavors = provider_dict["template_upload"].get("flavors", ["m1.medium"])
        provider_type = provider_data["management_systems"][kwargs["provider"]]["type"]
        deploy_args = {"vm_name": kwargs["vm_name"], "template": kwargs["template"]}
    else:
        provider = get_mgmt(kwargs["provider"])
        provider_dict = cfme_data["management_systems"][kwargs["provider"]]
        provider_type = provider_dict["type"]
        flavors = cfme_data["appliance_provisioning"]["default_flavors"].get(provider_type, [])
        deploy_args = {"vm_name": kwargs["vm_name"], "template": kwargs["template"]}

    logger.info("Connecting to {}".format(kwargs["provider"]))

    if kwargs.get("destroy", None):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args["vm_name"])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == "rhevm":
        cluster = provider_dict.get("default_cluster", kwargs.get("cluster", None))
        if cluster is None:
            raise Exception("--cluster is required for rhev instances and default is not set")
        deploy_args["cluster"] = cluster

        if kwargs.get("place_policy_host", None) and kwargs.get("place_policy_aff", None):
            deploy_args["placement_policy_host"] = kwargs["place_policy_host"]
            deploy_args["placement_policy_affinity"] = kwargs["place_policy_aff"]
    elif provider_type == "ec2":
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            flavor = kwargs.get("flavor", None) or flavors[0]
        except IndexError:
            raise Exception("--flavor is required for EC2 instances and default is not set")
        deploy_args["instance_type"] = flavor
    elif provider_type == "openstack":
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get("flavor", None) or flavors[0]
        except IndexError:
            raise Exception(
                "--flavor is required for RHOS instances and " "default is not set or unavailable on provider"
            )
        # flavour? Thanks, psav...
        deploy_args["flavour_name"] = flavor

        if "network" in provider_dict:
            # support rhos4 network names
            deploy_args["network_name"] = provider_dict["network"]

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get("floating_ip_pool", None) or provider_pools[0]
        except IndexError:
            raise Exception("No floating IP pools available on provider")

        if floating_ip_pool is not None:
            deploy_args["floating_ip_pool"] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == "scvmm":
        deploy_args["host_group"] = provider_dict["provisioning"]["host_group"]
    elif provider_type == "gce":
        deploy_args["ssh_key"] = "{user_name}:{public_key}".format(
            user_name=cred["ssh"]["ssh-user"], public_key=cred["ssh"]["public_key"]
        )
    # Do it!
    try:
        logger.info(
            "Cloning {} to {} on {}".format(deploy_args["template"], deploy_args["vm_name"], kwargs["provider"])
        )
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error("Clone failed")
        if kwargs.get("cleanup", None):
            logger.info("attempting to destroy {}".format(deploy_args["vm_name"]))
            destroy_vm(provider, deploy_args["vm_name"])
            return 12

    if provider.is_vm_running(deploy_args["vm_name"]):
        logger.info("VM {} is running".format(deploy_args["vm_name"]))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args["vm_name"]], num_sec=1200, fail_condition=None)
        logger.info("IP Address returned is {}".format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error("IP address not returned")
        return 10

    try:
        if kwargs.get("configure", None):
            logger.info("Configuring appliance, this can take a while.")
            if kwargs.get("deploy", None):
                app = IPAppliance(address=ip)
            else:
                app = Appliance(kwargs["provider"], deploy_args["vm_name"])
            if provider_type == "gce":
                app.configure_gce()
            else:
                app.configure()
            logger.info("Successfully Configured the appliance.")
    except Exception as e:
        logger.exception(e)
        logger.error("Appliance Configuration Failed")
        if not kwargs.get("deploy", None):
            app = Appliance(kwargs["provider"], deploy_args["vm_name"])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command("find /root/anaconda-post.log")
            if status == 0:
                ssh_client.get_file("/root/anaconda-post.log", log_path.join("anaconda-post.log").strpath)
            ssh_client.close()
        return 10

    if kwargs.get("outfile", None) or kwargs.get("deploy", None):
        with open(kwargs["outfile"], "w") as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
コード例 #45
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy', None):
        kwargs['configure'] = True
        provider_data = utils.conf.provider_data
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url', None),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy', None):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster', None))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host', None) and kwargs.get('place_policy_aff', None):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool', None) or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']

    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('Clone failed')
        if kwargs.get('cleanup', None):
            logger.info('attempting to destroy {}'.format(deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
            return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure', None):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy', None):
                app = IPAppliance(address=ip)
            else:
                app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy', None):
            app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command('find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile', None):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
コード例 #46
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://%s/' % address
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.address == address
コード例 #47
0
ファイル: remote.py プロジェクト: dajohnso/cfme_tests
    for pluginarg in config.option.plugins:
        config.pluginmanager.consider_pluginarg(pluginarg)
    config.pluginmanager.consider_pluginarg('no:fixtures.parallelizer')
    return config


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('slaveid', help='The name of this slave')
    parser.add_argument('appliance_json', help='The json data about the used appliance')
    parser.add_argument('ts', help='The timestap to use for collections')
    args = parser.parse_args()

    from utils.appliance import IPAppliance, stack
    appliance = IPAppliance.from_json(args.appliance_json)
    stack.push(appliance)

    # overwrite the default logger before anything else is imported,
    # to get our best chance at having everything import the replaced logger
    import utils.log
    utils.log.setup_for_worker(args.slaveid)

    from fixtures import terminalreporter
    from fixtures.pytest_store import store
    from utils import conf

    conf.runtime['env']['slaveid'] = args.slaveid
    conf.runtime['env']['ts'] = args.ts
    store.parallelizer_role = 'slave'
コード例 #48
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://{}/'.format(address)
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.address == address
コード例 #49
0
def test_ipappliance_from_url():
    address = '1.2.3.4'
    url = 'http://{}/'.format(address)
    ip_a = IPAppliance.from_url(url)
    assert ip_a.url == url
    assert ip_a.address == address
コード例 #50
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy', None):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        provider_data = utils.conf.provider_data
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url', None),
             }
        provider = get_mgmt(kwargs['provider'], providers=providers, credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors', ['m1.medium'])
        provider_type = provider_data['management_systems'][kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy', None):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster', None))
        if cluster is None:
            raise Exception('--cluster is required for rhev instances and default is not set')
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host', None) and kwargs.get('place_policy_aff', None):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs['place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for EC2 instances and default is not set')
        deploy_args['instance_type'] = flavor
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor', None) or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [p.name for p in provider.api.floating_ip_pools.list()]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get('floating_ip_pool', None) or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict["allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'], deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('Clone failed')
        if kwargs.get('cleanup', None):
            logger.info('attempting to destroy {}'.format(deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
            return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address, [deploy_args['vm_name']], num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure', None):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy', None):
                app = IPAppliance(address=ip)
            else:
                app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            if provider_type == 'gce':
                with app as ipapp:
                    ipapp.configure_gce()
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy', None):
            app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command('find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile', None) or kwargs.get('deploy', None):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)