def get_admin_token(RANCHER_SERVER_URL): """Returns a ManagementContext for the default global admin user.""" CATTLE_AUTH_URL = \ RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login" r = requests.post(CATTLE_AUTH_URL, json={ 'username': '******', 'password': '******', 'responseType': 'json', }, verify=False) print(r.json()) token = r.json()['token'] print(token) # Change admin password client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=token, verify=False) admin_user = client.list_user(username="******").data admin_user[0].setpassword(newPassword=ADMIN_PASSWORD) # Set server-url settings serverurl = client.list_setting(name="server-url").data client.update(serverurl[0], value=RANCHER_SERVER_URL) return token
def create_custom_cluster(admin_client): auth_url = RANCHER_SERVER_URL + \ "/v3-public/localproviders/local?action=login" user, user_token = create_user(admin_client, auth_url) aws_nodes = \ AmazonWebServices().create_multiple_nodes( 5, random_test_name(resource_prefix + "-custom")) node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"], ["worker"]] client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=user_token, verify=False) cluster = client.create_cluster(name=random_name(), driver="rancherKubernetesEngine", rancherKubernetesEngineConfig=rke_config) assert cluster.state == "provisioning" i = 0 for aws_node in aws_nodes: docker_run_cmd = \ get_custom_host_registration_cmd( client, cluster, node_roles[i], aws_node) aws_node.execute_command(docker_run_cmd) i += 1 validate_cluster(client, cluster, userToken=user_token)
def admin_cc(admin_mc): """Returns a ClusterContext for the local cluster for the default global admin user.""" cluster = admin_mc.client.by_id_cluster('local') url = cluster.links['self'] + '/schemas' client = rancher.Client(url=url, verify=False, token=admin_mc.client.token) return ClusterContext(admin_mc, cluster, client)
def cluster_and_client(cluster_id, mgmt_client): cluster = mgmt_client.by_id_cluster(cluster_id) url = cluster.links.self + '/schemas' client = rancher.Client(url=url, verify=False, token=mgmt_client.token) return cluster, client
def get_cluster_client_for_token_v1(cluster_id=None, token=None): if cluster_id is None: cluster = get_cluster_by_name(get_admin_client_v1(), CLUSTER_NAME) cluster_id = cluster["id"] if token is None: token = USER_TOKEN url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas" return rancher.Client(url=url, token=token, verify=False)
def admin_system_pc(admin_mc): """Returns a ProjectContext for the system project in the local cluster for the default global admin user.""" admin = admin_mc.client plist = admin.list_project(name='System', clusterId='local') assert len(plist) == 1 p = plist.data[0] url = p.links.self + '/schemas' return ProjectContext( admin_cc, p, rancher.Client(url=url, verify=False, token=admin.token))
def pc(request, cc): p = cc.management.client.create_project(name='test-' + random_str(), clusterId=cc.cluster.id) p = cc.management.client.wait_success(p) wait_for_condition("BackingNamespaceCreated", "True", cc.management.client, p) assert p.state == 'active' request.addfinalizer(lambda: cc.management.client.delete(p)) url = p.links['self'] + '/schemas' return ProjectContext( cc, p, rancher.Client(url=url, verify=False, token=cc.client._token))
def set_url_and_password(): admin_token = set_url_password_token(RANCHER_SERVER_URL) admin_client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=admin_token, verify=False) AUTH_URL = RANCHER_SERVER_URL + \ "/v3-public/localproviders/local?action=login" user, user_token = create_user(admin_client, AUTH_URL) env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n" env_details += "env.ADMIN_TOKEN='" + admin_token + "'\n" env_details += "env.USER_TOKEN='" + user_token + "'\n" create_config_file(env_details)
def admin_mc(): """Returns a ManagementContext for the default global admin user.""" requests.post(CHNG_PWD_URL, json={ 'newPassword': '******', }, verify=False) r = requests.post(AUTH_URL, json={ 'username': '******', 'password': '******', 'responseType': 'json', }, verify=False) client = rancher.Client(url=BASE_URL, token=r.json()['token'], verify=False) return ManagementContext(client)
def admin_mc(): """Returns a ManagementContext for the default global admin user.""" r = requests.post(AUTH_URL, json={ 'username': '******', 'password': '******', 'responseType': 'json', }, verify=False) protect_response(r) client = rancher.Client(url=BASE_URL, token=r.json()['token'], verify=False) k8s_client = kubernetes_api_client(client, 'local') admin = client.list_user(username='******').data[0] return ManagementContext(client, k8s_client, user=admin)
def mc(url, auth_url, chngpwd): requests.post(chngpwd, json={ 'newPassword': '******', }, verify=False) r = requests.post(auth_url, json={ 'username': '******', 'password': '******', 'responseType': 'json', }, verify=False) client = rancher.Client(url=url, token=r.json()['token'], verify=False) return ManagementContext(client)
def test_websocket(admin_mc): client = rancher.Client(url=BASE_URL, token=admin_mc.client.token, verify=False) # make a request that looks like a websocket client._session.headers["Connection"] = "upgrade" client._session.headers["Upgrade"] = "websocket" client._session.headers["Origin"] = "badStuff" # do something with client now that we have a "websocket" with pytest.raises(rancher.ApiError) as e: client.list_cluster() assert e.value.error.Code.Status == 403
def _admin_pc(): admin = admin_cc.management.client p = admin.create_project(name='test-' + random_str(), clusterId=admin_cc.cluster.id) p = admin.wait_success(p) wait_for_condition("BackingNamespaceCreated", "True", admin_cc.management.client, p) assert p.state == 'active' remove_resource(p) p = admin.reload(p) url = p.links.self + '/schemas' return ProjectContext( admin_cc, p, rancher.Client(url=url, verify=False, token=admin.token))
def test_group_grbs(): groups = search_ad_groups(RANCHER_AUTH_GROUP, ADMIN_TOKEN) admin_client = get_admin_client() r = requests.post(CATTLE_AUTH_URL, json={ 'username': RANCHER_AUTH_USERNAME, 'password': RANCHER_AUTH_PASSWORD, 'responseType': 'json', }, verify=False) token = r.json()["token"] testuser3_client = rancher.Client(url=CATTLE_TEST_URL + "/v3", token=token, verify=False) with pytest.raises(ApiError) as e: rt = testuser3_client.create_role_template(name="rt-" + random_str()) assert e.value.error.status == 403 assert e.value.error.code == 'Forbidden' gr = admin_client.create_global_role_binding( globalRoleId="admin", groupPrincipalId=groups[0]["id"]) def try_create_role_template(): try: return testuser3_client.create_role_template(name="rt-" + random_str()) except ApiError as e: assert e.error.status == 403 assert e.value.error.code == 'Forbidden' return False rt = wait_for(try_create_role_template) admin_client.delete(gr) # once user is no longer admin, they will be unable to see local cluster # this is less wasteful than attemptingg to create roletemplates until # unauthorized error is encountered wait_for(lambda: len(testuser3_client.list_cluster()) == 0) try: testuser3_client.create_role_template(name="rt-" + random_str()) except ApiError as e: assert e.error.status == 403 assert e.error.code == 'Forbidden'
def user_mc(admin_mc): """Returns a ManagementContext for a newly created standard user""" admin = admin_mc.client username = random_str() password = random_str() user = admin.create_user(username=username, password=password) admin.create_global_role_binding(userId=user.id, globalRoleId='user') response = requests.post(AUTH_URL, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) client = rancher.Client(url=BASE_URL, token=response.json()['token'], verify=False) return ManagementContext(client)
def admin_pc(request, admin_cc): """Returns a ProjectContext for a newly created project in the local cluster for the default global admin user. The project will be deleted when this fixture is cleaned up.""" admin = admin_cc.management.client p = admin.create_project(name='test-' + random_str(), clusterId=admin_cc.cluster.id) p = admin.wait_success(p) wait_for_condition("BackingNamespaceCreated", "True", admin_cc.management.client, p) assert p.state == 'active' request.addfinalizer(lambda: admin_cc.management.client.delete(p)) url = p.links.self + '/schemas' return ProjectContext( admin_cc, p, rancher.Client(url=url, verify=False, token=admin.token))
def _create_user(globalRoleId='user'): admin = admin_mc.client username = random_str() password = random_str() user = admin.create_user(username=username, password=password) remove_resource(user) grb = admin.create_global_role_binding( userId=user.id, globalRoleId=globalRoleId) remove_resource(grb) response = requests.post(AUTH_URL, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) protect_response(response) client = rancher.Client(url=BASE_URL, token=response.json()['token'], verify=False) return ManagementContext(client, user=user)
def test_deploy_rancher_server(): RANCHER_SERVER_CMD = \ "docker run -d --restart=unless-stopped -p 80:80 -p 443:443 " + \ "rancher/rancher" RANCHER_SERVER_CMD += ":" + RANCHER_SERVER_VERSION aws_nodes = AmazonWebServices().create_multiple_nodes( 1, random_test_name("testsa" + HOST_NAME)) aws_nodes[0].execute_command(RANCHER_SERVER_CMD) time.sleep(120) RANCHER_SERVER_URL = "https://" + aws_nodes[0].public_ip_address print(RANCHER_SERVER_URL) wait_until_active(RANCHER_SERVER_URL) token = get_admin_token(RANCHER_SERVER_URL) aws_nodes = \ AmazonWebServices().create_multiple_nodes( 5, random_test_name("testcustom")) node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"], ["worker"]] client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=token, verify=False) cluster = client.create_cluster(name=random_name(), driver="rancherKubernetesEngine", rancherKubernetesEngineConfig=rke_config) assert cluster.state == "active" i = 0 for aws_node in aws_nodes: docker_run_cmd = \ get_custom_host_registration_cmd(client, cluster, node_roles[i], aws_node) aws_node.execute_command(docker_run_cmd) i += 1 validate_cluster_state(client, cluster) env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n" env_details += "env.ADMIN_TOKEN='" + token + "'\n" file = open(env_file, "w") file.write(env_details) file.close()
def _create_user(globalRoleId='user'): admin = admin_mc.client # User creation will fail if password < minimum (default: 12) or # username == password. Since random_str concatenates a random number # plus seconds since epoch, this ensures no collisions username = random_str() + "username" password = random_str() + "password" user = admin.create_user(username=username, password=password) remove_resource(user) grb = admin.create_global_role_binding(userId=user.id, globalRoleId=globalRoleId) remove_resource(grb) response = requests.post(AUTH_URL, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) protect_response(response) client = rancher.Client(url=BASE_URL, token=response.json()['token'], verify=False) return ManagementContext(client, user=user)
def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_admin_client_v1(): url = CATTLE_TEST_URL + "/v1" # in fact, we get the cluster client for the local cluster return rancher.Client(url=url, token=ADMIN_TOKEN, verify=False)
import rancher import json r = rancher.Client(url="http://127.0.0.1:8080/v2-beta", access_key="16AE439773F86224BB9D", secret_key="ProvbtkakdHCMJwU12fpXoiW9wSiXUGUcPw9Ned4") # project: '1a5' # stack: '1st5' # service: '1s7' a = r.by_id_project('1a5') st = r.by_id_stack('1st5') s = r.by_id_service('1s5') ss = a.services() ss.create_service(name="s1") print(a.services()) pass
GL = gitlab.Gitlab(os.getenv('GITLAB_URL', 'https://gitlab.company.com'), private_token=os.getenv('GITLAB_TOKEN', 'GITLAB_TOKEN')) projects_regexp = re.compile(r"-([0-9]{1,})-[\s\S-]{1,24}$") RANCHER_ACCESS_KEY = os.getenv('RANCHER_ACCESS_KEY', 'RANCHER_ACCESS_KEY') RANCHER_SECRET_KEY = os.getenv('RANCHER_SECRET_KEY', 'RANCHER_SECRET_KEY') RANCHER_URL = os.getenv('RANCHER_URL', 'https://rancher.company.com/v3') RANCHER_SSL_VERIFY = bool(int(os.getenv('RANCHER_SSL_VERIFY', '1'))) RANCHER_CLUSTER_ID = os.getenv('RANCHER_CLUSTER_ID', 'c-xxxxx') RANCHER_DEFAULT_PROJECT_ID = os.getenv('RANCHER_DEFAULT_PROJECT_ID', 'p-yyyyy') RANCHER_PROJECT_PSP = os.getenv('RANCHER_PROJECT_PSP', None) rancher_client = rancher.Client(url=RANCHER_URL, access_key=RANCHER_ACCESS_KEY, secret_key=RANCHER_SECRET_KEY, verify=RANCHER_SSL_VERIFY) @app.route('/healthz', methods=['GET']) def healthz(): return jsonify({"response": "OK"}) @app.route('/autostop/workloads', methods=['POST']) def workload_records(): request_info = request.get_json() # ignored Namespaces if request_info['request']["namespace"] in IGNORE_NAMESPACE: return admission_response(True, "Workload in ignore namespaces") try:
def cc(mc): cluster = mc.client.by_id_cluster('local') url = cluster.links['self'] + '/schemas' client = rancher.Client(url=url, verify=False, token=mc.client._token) return ClusterContext(mc, cluster, client)
def get_user_client(): return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client
def get_client_for_token(token): return rancher.Client(url=CATTLE_API_URL, token=token, verify=False)
import sys import rancher import random import string def random_str(): return ''.join([ random.choice(string.ascii_letters + string.digits) for n in range(8) ]) cluster_id = sys.argv[1] access = sys.argv[2] secret = sys.argv[3] url = sys.argv[4] project_name = "p-" + random_str() client = rancher.Client(url='https://bf57de7d.ngrok.io/v3', access_key=access, secret_key=secret) p = client.create_project(name="testing", clusterId=cluster_id) print('project id: ' + p.id) client.delete(p)
def up(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client
def user_project_client(user, project): """Returns a project level client for the user""" return rancher.Client(url=project.links.self + '/schemas', verify=False, token=user.client.token)