Example #1
0
def get_mount_table(cloud: Cloud, server: Server) -> tp.Dict[str, str]:
    LOG.info('Save mount table from server "%s"' % server.id)
    node = cloud.get_node(server)
    mount_table = node.run('mount').std_out.splitlines()
    mount_table = {x.split()[2]: x.split()[0] for x in mount_table if x}
    LOG.debug('Mount table:\n %s' % mount_table)
    return mount_table
Example #2
0
 def test_webhooks(self, context: dict, cloud: Cloud, farm: Farm, servers: dict, testenv, ssl_verify: bool, webhooks: list, expected_results: list):
     server = servers.get('F1')
     params = {
         "scalr.system.webhooks.scalr_labs_workflow_engine": True,
         "scalr.system.webhooks.ssl_verify": ssl_verify,
         "scalr.system.webhooks.retry_interval": 5,
         "scalr.system.webhooks.use_proxy": False}
     lib_scalr.update_scalr_config(testenv, params)
     testenv.restart_service("workflow-engine")
     testenv.restart_service("zmq_service")
     node = cloud.get_node(server)
     node.put_file("/tmp/default",
                   resources('configs/nginx_to_flask_proxy.conf').get().decode("utf-8"))
     node.put_file("/tmp/prepare_flask.sh",
                   resources('scripts/prepare_flask.sh').get().decode("utf-8"))
     node.put_file('webhooks.py',
                   resources('scripts/webhooks.py').get().decode("utf-8"))  # Put flask script
     with node.remote_connection() as conn:
         # Run preparation script
         conn.run("sudo bash /tmp/prepare_flask.sh")
         # Run flask in background process
         conn.run("gunicorn -D -w 1 --bind localhost:5000 webhooks:app")
     lib_apache.assert_check_http_get_answer(server)
     lib_webhooks.configure_webhooks(webhooks, server, farm, context)
     result = lib_node.execute_command(cloud, server, 'szradm --fire-event AccountEvent')
     assert not result.std_err, "Command szradm --fire-event AccountEvent failed with %s" % result.std_err
     lib_webhooks.assert_webhooks(context['test_webhooks'], expected_results, server_id=server.id)
     assert not testenv.check_service_log("workflow-engine", "Traceback"), "Found Traceback in workflow-engine service log!"
def check_redis_instances(step, instances_count, instance_type, serv_as):
    server = getattr(world, serv_as)
    instances = getattr(world, 'redis_instances', {})
    count = 0
    for instance in instances:
        if CONF.feature.platform in ['cloudstack', 'idcf']:
            cloud = Cloud()
            node = cloud.get_node(server)
            ip = filter(lambda x: x.address == server.public_ip, node.driver.ex_list_public_ip())[0]
            try:
                rule = node.driver.ex_add_port_forwarding_rule(node, ip, 'TCP', instance, instance)
            except:
                rules = node.driver.ex_list_port_forwarding_rule()
                rule = filter(lambda x: x.public_port == instance and x.address == ip, rules)[0]
            LOG.info('Rule for open port add: %s %s - %s' % (ip, instance, instance))
        info = {}
        try:
            LOG.debug('Try connect to redis instance: %s:%s:%s' % (server.public_ip, instance, instances[instance]))
            r = redis.Redis(host=server.public_ip, port=instance, password=instances[instance], socket_timeout=5)
            info = r.info()
        except redis.ConnectionError, e:
            LOG.error('Connection to redis: %s:%s with password %s is FAILED' % (server.public_ip, instance,
                                                                                 instances[instance]))
            raise redis.ConnectionError('Connection to redis: %s:%s with password %s is FAILED' % (server.public_ip, instance,
                                                                                                   instances[instance]))
        finally:
def action_on_redis(step, action, instance_number, serv_as):
    server = getattr(world, serv_as)
    instances = getattr(world, 'redis_instances', {})
    instance = sorted(instances.items())[int(instance_number)-1]
    if CONF.feature.platform in ['cloudstack', 'idcf']:
        cloud = Cloud()
        node = cloud.get_node(server)
        ip = filter(lambda x: x.address == server.public_ip, node.driver.ex_list_public_ip())[0]
        try:
            rule = node.driver.ex_add_port_forwarding_rule(node, ip, 'TCP', instance[0], instance[0])
        except:
            rules = node.driver.ex_list_port_forwarding_rule()
            rule = filter(lambda x: x.public_port == instance[0] and x.address == ip, rules)[0]
        LOG.info('Rule for open port add: %s %s - %s' % (ip, instance[0], instance[0]))
    r = redis.Redis(host=server.public_ip, port=instance[0], password=instance[1], socket_timeout=5, db=0)
    if action == 'write':
        LOG.info('Insert test key to %s:%s' % (server.public_ip, instance[0]))
        r.set('test_key', 'test_value')
    elif action == 'read':
        LOG.info('Read test key from %s:%s' % (server.public_ip, instance[0]))
        data = r.get('test_key')
        if not data == 'test_value':
            LOG.error('Receive bad key value from redis instance: %s:%s' % (server.public_ip, instance[0]))
            raise AssertionError('Receive bad key value from redis instance: %s:%s' % (server.public_ip, instance[0]))
    if CONF.feature.platform in ['cloudstack', 'idcf']:
        node.driver.ex_delete_port_forwarding_rule(node, rule)
        LOG.info('Rule for open port was delete')
Example #5
0
def assert_at_user_on_server(cloud: Cloud, server: api.Server, expected_user: str):
    node = cloud.get_node(server)
    cmd = 'net user' if CONF.feature.dist.is_windows else 'cut -d : -f 1 /etc/passwd'
    with node.remote_connection() as conn:
        user_list = conn.run(cmd).std_out.split()
        assert expected_user in user_list, \
            f'User {expected_user} was not found on the server! User list output: {user_list}'
 def test_checking_config_changes(self, cloud: Cloud, servers: dict):
     """Checking config changes: INTERVAL"""
     server = servers['M1']
     node = cloud.get_node(server)
     interval = 15
     provision.change_chef_client_interval_value(node, interval)
     provision.assert_chef_client_interval_value(node, interval)
     provision.assert_chef_runs_time(node, interval)
Example #7
0
 def test_chef_deployment_linux(self, context: dict, cloud: Cloud, servers: dict):
     """Verify chef executed fine"""
     server = servers['M1']
     node = cloud.get_node(server)
     context['chef_deployment_time'] = provision.get_chef_bootstrap_stat(node)
     lib_node.assert_process_has_options(cloud, server, process='memcached', options='-m 1024')
     lib_node.assert_process_has_options(cloud, server, process='chef-client', options='--daemonize')
     provision.assert_chef_node_name_equal_hostname(cloud, server)
Example #8
0
def check_count_redis_instances(step, instances_count, serv_as):
    server = getattr(world, serv_as)
    c = Cloud()
    node = c.get_node(server)
    out = node.run('ps -A | grep redis-server')
    if not len(out[0].splitlines()) == int(instances_count):
        LOG.error('Redis processes: %s' % out[0])
        raise AssertionError('Invalid redis processes count, must be: %s but %s' % (instances_count, len(out[0].splitlines())))
Example #9
0
def assert_scalarizr_log_errors(cloud: Cloud, server: Server, log_type: str = None):
    """Check scalarizr log for errors"""
    log_type = log_type or 'debug'
    node = cloud.get_node(server)
    if CONF.feature.dist.is_windows:
        validate_scalarizr_log_errors(cloud, node, windows=True, server=server, log_type=log_type)
    else:
        validate_scalarizr_log_errors(cloud, node, log_type=log_type)
Example #10
0
 def test_chef_after_resume_linux(self, context: dict, cloud: Cloud, servers: dict, farm: Farm):
     """Verify chef not executed after resume"""
     server = servers['M1']
     node = cloud.get_node(server)
     assert context['chef_deployment_time'] == provision.get_chef_bootstrap_stat(node), \
         'Chef was started after resume!'
     provision.check_process_status(node, 'memcached', False)
     lib_node.assert_process_has_options(cloud, server, process='chef-client', options='--daemonize')
     provision.assert_chef_node_name_equal_hostname(cloud, server)
Example #11
0
def validate_string_in_file(cloud: Cloud, server: Server, file_path: str, value: str, invert: bool = False):
    LOG.info('Verify file "%s" in %s %s "%s"' % (file_path, server.id,
                                                 'does not contain' if invert else 'contains',
                                                 value))
    node = cloud.get_node(server)
    out = node.run('cat %s | grep %s' % (file_path, value)).std_out.strip()
    assert bool(out) ^ invert, \
        'File %s %s: %s. Result of grep: %s' % (file_path,
                                                'contains' if invert else 'does not contain',
                                                value, out)
Example #12
0
def assert_szr_terminated_in_log(cloud: Cloud, server: Server):
    # TODO: PP > consolidate win/linux methods
    node = cloud.get_node(server)
    if CONF.feature.ci_repo == 'buildbot':
        out = node.run("findstr /c:\"Scalarizr terminated\" \"C:\Program Files\Scalarizr\\var\log\scalarizr_debug.log\"")
    else:
        out = node.run("findstr /c:\"Scalarizr terminated\" \"C:\opt\scalarizr\\var\log\scalarizr_debug.log\"")
    if 'Scalarizr terminated' in out.std_out:
        return True
    raise AssertionError("Not see 'Scalarizr terminated' in debug log")
 def test_chef_solo_bootstrapping(self, context: dict, cloud: Cloud, farm: Farm, role_options: str):
     """Bootstrapping role with chef-solo"""
     lib_farm.clear(farm)
     farm.terminate()
     lib_farm.add_role_to_farm(context, farm, role_options=[role_options])
     farm.launch()
     server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING)
     node = cloud.get_node(server)
     lib_server.assert_file_exist(node, f'C:\{role_options}')
     provision.assert_script_data_deleted(cloud, server)
Example #14
0
def szradm_execute_command(command: str, cloud: Cloud, server: Server, format_output: bool=True):
    command = format_output and f"{command} --format=json" or command
    if CONF.feature.dist.id == 'coreos':
        command = f'PATH=$PATH:/opt/bin; {command}'
    LOG.info(f'Execute the command: {command} a remote host: {server.id}')
    node = cloud.get_node(server)
    with node.remote_connection() as conn:
        result = conn.run(command)
        if result.status_code:
            raise AssertionError(f"An error has occurred while execute szradm:\n {out.std_err}")
        return format_output and json.loads(result.std_out) or result.std_out
Example #15
0
def assert_chef_node_name_equal_hostname(cloud: Cloud, server: api.Server):
    hostname = lib_server.get_hostname_by_server_format(server)
    node = cloud.get_node(server)
    command = CONF.feature.dist.is_windows \
        and 'findstr node_name c:\\chef\\client.rb' \
        or 'cat /etc/chef/client.rb | grep node_name'
    with node.remote_connection() as conn:
        result = conn.run(command).std_out
        node_name = result.strip().split()[1][1:-1]
        if not node_name == hostname:
            raise AssertionError(f'Chef node_name "{node_name}" != hostname on server "{hostname}"')
Example #16
0
def check_processes(step, count, serv_as):
    time.sleep(60)
    serv = getattr(world, serv_as)
    cl = Cloud()
    node = cl.get_node(serv)
    list_proc = node.run("ps aux | grep scalarizr")[0]
    c = 0
    for pr in list_proc.splitlines():
        if "bin/scalarizr" in pr:
            c += 1
    LOG.info("Scalarizr count of processes %s" % c)
    world.assert_not_equal(c, int(count), "Scalarizr processes is: %s but processes \n%s" % (c, list_proc))
Example #17
0
def set_iptables_rule(cloud: Cloud, server: Server, port: tp.Union[int, tuple, list]):
    """Insert iptables rule in the top of the list"""
    LOG.info('Insert iptables rule to server %s for opening port %s' % (server, port))
    node = cloud.get_node(server)
    my_ip = get_external_local_ip()
    LOG.info('My IP address: %s' % my_ip)
    if isinstance(port, (tuple, list)):
        if len(port) == 2:
            port = ':'.join(str(x) for x in port)
        else:
            port = ','.join(str(x) for x in port)
    node.run('iptables -I INPUT -p tcp -s %s --dport %s -j ACCEPT' % (my_ip, port))
Example #18
0
def agent_restart(cloud: Cloud, server: Server):
    # TODO: PP > consolidate win/linux methods
    node = cloud.get_node(server)
    with node.remote_connection() as conn:
        LOG.info('Restart scalarizr via winrm')
        LOG.debug('Stop scalarizr')
        out = conn.run('net stop Scalarizr')
        time.sleep(3)
        LOG.debug(out)
        out = conn.run('net start Scalarizr')
        LOG.debug(out)
        time.sleep(15)
 def test_bootstrapping_form_chef_role(self, context: dict, cloud: Cloud, farm: Farm):
     """Bootstrapping from chef role"""
     lib_farm.clear(farm)
     farm.terminate()
     lib_farm.add_role_to_farm(context, farm, role_options=['winchef-role'])
     farm.launch()
     server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING)
     node = cloud.get_node(server)
     lib_server.assert_file_exist(node, 'C:\chef_result_file')
     lib_server.assert_scalarizr_log_errors(cloud, server)
     lifecycle.assert_szr_version_last(server)
     lifecycle.assert_hostname(server)
Example #20
0
def assert_path_exist(cloud: Cloud, server: Server, path: str):
    """Validate path exist in server"""
    LOG.info(f'Verify path {path} exist in server {server.id}')
    node = cloud.get_node(server)
    with node.remote_connection() as conn:
        for attempt in range(5):
            out = conn.run('/bin/ls %s' % path)
            if out.status_code == 0:
                break
            time.sleep(15)
        else:
            LOG.error(f'Path {path} does not exist in server {server.id}')
            raise AssertionError(f'Path {path} does not exist in server {server.id}')
Example #21
0
def get_config_from_message(cloud: Cloud, server: Server, config_group: str, message: str) -> dict:
    node = cloud.get_node(server)
    LOG.info(f'Get messages from server {server.id}')
    messages = lib_server.get_szr_messages(node)
    msg_id = next(filter(lambda x: x['name'] == message, messages))['id']
    LOG.info(f'Message id for {message} is {msg_id}')
    cmd = f'szradm message-details {msg_id} --json'
    if CONF.feature.dist.id == 'coreos':
        cmd = "/opt/bin/" + cmd
    message_details = json.loads(node.run(cmd).std_out)['body']
    LOG.info(f'Message details is {message_details}')
    LOG.info(f'Returning message part {config_group}')
    return message_details[config_group]
Example #22
0
def assert_chef_bootstrap_failed(cloud: Cloud, server: api.Server):
    node = cloud.get_node(server)
    assertion_msg = "Chef bootstrap marker not found in scalarizr_debug.log"
    if CONF.feature.dist.is_windows:
        failure_marker = 'chef-client" exited with code 1'
        cmd = 'findstr /C:"Command \\"C:\opscode\chef\\bin\chef-client\\" exited with code 1"' \
              ' "C:\opt\scalarizr\\var\log\scalarizr_debug.log"'
        assert failure_marker in node.run(cmd).std_out, assertion_msg
    else:
        failure_markers = [
            'Command "/usr/bin/chef-client" exited with code 1',
            'Command /usr/bin/chef-client exited with code 1']
        assert any(node.run(f'grep {m} /var/log/scalarizr_debug.log').std_out.strip() for m in failure_markers), \
            assertion_msg
 def test_bootstrapping_with_chef(self, context: dict, cloud: Cloud, farm: Farm, servers: dict):
     """Bootstrapping with chef"""
     lib_farm.add_role_to_farm(context, farm, role_options=['winchef'])
     farm.launch()
     server = lib_server.wait_server_status(context, cloud, farm, status=ServerStatus.RUNNING)
     servers['M1'] = server
     lib_server.assert_scalarizr_log_errors(cloud, server)
     lifecycle.assert_szr_version_last(server)
     provision.assert_node_exists_on_chef_server(server)
     orchestration.assert_recipes_in_runlist(server, ['windows_file_create', 'revizorenv', 'revizor_chef_multi'])
     node = cloud.get_node(server)
     lib_server.assert_file_exist(node, 'C:\chef_result_file')
     lib_server.assert_file_exist(node, 'C:\changed_result')
     provision.assert_chef_node_name_equal_hostname(cloud, server)
     provision.assert_chef_log_contains_text(server, "revizor_chef_variable=REVIZOR_CHEF_VARIABLE_VALUE_WORK")
Example #24
0
def create_partitions_on_volume(cloud: Cloud, server: Server, mnt_point: str):
    script_name = 'create_partitions.sh'
    script_src = resources(Path('scripts', script_name)).get().decode()
    path = Path('/tmp', script_name)
    node = cloud.get_node(server)

    LOG.info('Creating partitions table for volume on %s' % mnt_point)
    node.put_file(str(path), script_src % mnt_point)
    out = node.run(f'source {path}')

    partition_table = out.std_out.strip('\n').splitlines()[-4:]
    LOG.debug('Created partitions table for volume:\n%s' % '\n'.join(partition_table))
    assert all(line.startswith('/dev/') for line in partition_table), \
        'Create volume partitions failed: %s' % out.std_err
    LOG.info('Partitions table for volume was successfully created')
Example #25
0
def assert_script_data_deleted(cloud: Cloud, server: api.Server):
    node = cloud.get_node(server)
    server.scriptlogs.reload()
    LOG.info('Check script executed data was deleted')
    if not server.scriptlogs:
        raise AssertionError("No orchestration logs found on %s" % server.id)
    task_dir = server.scriptlogs[0].execution_id.replace('-', '')
    if CONF.feature.dist.is_windows:
        cmd = f'dir c:\\opt\\scalarizr\\var\\lib\\tasks\\{task_dir} /b /s /ad | findstr /e "\\bin \\data"'
    else:
        cmd = f'find /var/lib/scalarizr/tasks/{task_dir} -type d -regex ".*/\\(bin\\|data\\)"'
    with node.remote_connection() as conn:
        result = conn.run(cmd)
        assert not result.std_err, f"Command: {cmd} execution error:\n {result.std_err}"
        folders = [l for l in result.std_out.splitlines() if l.strip()]
        assert not folders,  f"Find script data {folders} on {server.id}"
Example #26
0
def assert_attached_disks_size(cloud: Cloud, server: Server, disks: tp.List[tp.Tuple[str, str, int]]):
    node = cloud.get_node(server)
    out = node.run('wmic volume get Caption,Capacity,Label').std_out
    server_disks = [line.split() for line in out.splitlines() if line.strip()][1:]
    for d, label, size in disks:
        for disk in server_disks:
            if disk[1] == d:
                server_size = int(disk[0]) // 1000000000
                if server_size != size:
                    raise AssertionError("Disk %s is of wrong size  - %s " % (disk[1], server_size))
                if len(disk) > 2 and disk[2] not in label:
                    raise AssertionError("Disk %s has incorrect or no label '%s'. Should be '%s'." % (
                        disk[1], disk[2], label))
                break
        else:
            raise AssertionError("Disk not found! All server disks %s" % server_disks)
Example #27
0
def assert_errors_in_szr_logs(cloud: Cloud, server: Server):
    node = cloud.get_node(server)
    out = node.run("findstr /c:\"ERROR\" \"C:\\opt\\scalarizr\\var\\log\\scalarizr_debug.log\"").std_out
    errors = []
    if 'ERROR' in out:
        log = out.splitlines()
        for l in log:
            try:
                d = datetime.strptime(l.split()[0], '%Y-%m-%d')
                log_level = l.strip().split()[3]
            except ValueError:
                continue
            now = datetime.now()
            if not d.year == now.year or not d.month == now.month or not d.day == now.day or not log_level == 'ERROR':
                continue
            errors.append(l)
    if errors:
        raise AssertionError('ERROR in log: %s' % errors)
Example #28
0
def wait_unstored_message(cloud: Cloud, servers: tp.Union[Server, tp.List[Server]],
                          message_name: str, message_type: str = 'out',
                          find_in_all: bool = False,
                          timeout: int = 1000):
    if not isinstance(servers, (list, tuple)):
        servers = [servers]
    delivered_to = []
    server_messages = {}
    message_type = 'in' if message_type.strip() not in ('sends', 'out') else 'out'
    start_time = time.time()
    while time.time() - start_time < timeout:
        if delivered_to == servers:
            LOG.info('All servers has message: %s / %s' % (message_type, message_name))
            break
        for server in servers:
            if server in delivered_to:
                continue
            LOG.info('Searching message "%s/%s" on %s node' % (message_type, message_name, server.id))
            node = cloud.get_node(server)
            lookup_messages = server_messages.setdefault(server.id, [])
            node_messages = reversed(get_szr_messages(node, convert=True))
            message = list(filter(lambda m:
                                  m.name == message_name
                                  and m.direction == message_type
                                  and m.id not in lookup_messages
                                  and strtobool(m.handled), node_messages))

            if message:
                LOG.info('Message found: %s' % message[0].id)
                lookup_messages.append(message[0].id)
                if find_in_all:
                    LOG.info('Message %s delivered to the server %s' % (message_name, server.id))
                    delivered_to.append(server)
                    continue
                return server
        time.sleep(30)
    else:
        raise MessageNotFounded('%s/%s was not finding' % (message_type, message_name))
Example #29
0
def assert_mount_point_in_fstab(cloud: Cloud, server: Server, mount_table: tp.Dict[str, str], mount_point: str):
    LOG.info('Verify disk from mount point "%s" exist in fstab on server "%s"' %
             (mount_point, server.id))
    node = cloud.get_node(server)
    with node.remote_connection() as conn:
        for i in range(3):
            fstab = conn.run('cat /etc/fstab').std_out
            if not fstab:  # FIXME: on openstack this trouble was, fix this
                LOG.warning('cat /etc/fstab return nothing')
                time.sleep(15)
                continue
            break
        fstab = fstab.splitlines()
        fstab = {x.split()[1]: x.split()[0] for x in fstab if x if re.match(r"^[\/\d]", x)}
        LOG.debug('Fstab on server "%s" contains:\n %s' % (server.id, fstab))
        if mount_point not in mount_table:
            raise AssertionError('Mount point "%s" not exist in mount table:\n%s' %
                                 (mount_point, mount_table))
        if mount_point not in fstab:
            raise AssertionError('Mount point "%s" not exist in fstab:\n%s' %
                                 (mount_point, fstab))

        fstab_real_path_disk = fstab[mount_point]
        mount_real_path_disk = mount_table[mount_point]

        LOG.debug(f'Fstab and mount table state for {mount_point}: {fstab[mount_point]} {mount_table[mount_point]}')

        if 'by-uuid' in fstab[mount_point]:
            LOG.debug(f'Fstab mount point has by-uuid, get real path for {fstab[mount_point]}')
            fstab_real_path_disk = conn.run(f'readlink -f {fstab[mount_point]}').std_out.strip()
            LOG.debug(f'Fstab real path for disk: {fstab_real_path_disk}')
        elif 'by-uuid' in mount_table[mount_point]:
            LOG.debug(f'Mount point has by-uuid, get real path for {fstab[mount_point]}')
            mount_real_path_disk = conn.run(f'readlink -f {mount_table[mount_point]}').std_out.strip()
            LOG.debug(f'Mount point real path for disk: {mount_real_path_disk}')
        assert fstab_real_path_disk == mount_real_path_disk, (
                'Disk from mount != disk in fstab: "%s" != "%s"' % (mount_real_path_disk, fstab_real_path_disk))
Example #30
0
def install_docker(cloud: Cloud, server: Server) -> docker.APIClient:
    node = cloud.get_node(server)
    conf_folder = ''
    restart_cmd = 'service docker restart'
    if CONF.feature.dist.is_systemd:
        conf_file = '/etc/systemd/system/docker.service.d/docker.conf'
        echo_line = '''"[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H unix:///var/run/docker.sock -H tcp://0.0.0.0:3389"'''
        conf_folder = 'mkdir /etc/systemd/system/docker.service.d;'
        restart_cmd = 'systemctl daemon-reload; systemctl restart docker'
    else:
        conf_file = '/etc/default/docker'
        echo_line = """'DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:3389"'"""
    command = '''curl -fsSL https://get.docker.com/ | sh; {}\
        echo -e {} >> {};\
        {}; \
        docker pull ubuntu; \
        docker pull nginx; \
        docker pull alpine'''.format(conf_folder, echo_line, conf_file, restart_cmd)
    with node.remote_connection() as conn:
        conn.run(command)
        conn.run("iptables -I INPUT 1 -p tcp --dport 3389 -j ACCEPT")
        conn.run('echo "sleep 1d" >> /home/scalr/{}'.format(SCRIPT))
        assert conn.run('docker --version')
    return docker.APIClient(base_url='http://%s:3389' % server.public_ip, version='auto')