def internal_haproxy(self): new_service = { "target_num_instances": 1, "service_name": "redis1", "linked_to_apps": "{}", "region_name": self.region_name, "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": self.namespace, "network_mode": "BRIDGE", "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "library/redis")), "image_tag": settings.IMAGE_TAG, "target_state": "STARTED", "instance_ports": [ { "ipaddress": "sharedip", "service_port": 6379, "endpoint_type": "internal-endpoint", "protocol": "tcp", "container_port": 6379 } ], "space_name": settings.SPACE_NAME } return new_service
def config_service(self): new_service = { "service_name": self.service_name, "health_checks": [], "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "instance_size": "XXS", "linked_to_apps": "{}", "load_balancer_choice": "ENABLE", "load_balancers": [], "namespace": self.namespace, "network_mode": "BRIDGE", "region_name": self.region_name, "scaling_mode": "MANUAL", "service_mode": "SINGLE", "target_num_instances": 1, "target_state": "STARTED", "mount_points": [ { "path": "/home/config_path", "type": "config", "value": { "name": self.config_name, "key": "name2" } } ], "space_name": settings.SPACE_NAME } return new_service
def haproxy_bridge_service(self): new_service = { "service_name": "haproxybridge" + self.service_name, "health_checks": [], "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "vipertest/e2e")), "image_tag": settings.IMAGE_TAG, "instance_envvars": {"ENV": "TestEnv"}, "instance_ports": [ { "container_port": 80, "protocol": "tcp", "endpoint_type": "http-endpoint" } ], "instance_size": "XXS", "linked_to_apps": "{}", "load_balancer_choice": "ENABLE", "load_balancers": [], "namespace": self.namespace, "network_mode": "BRIDGE", "region_name": self.region_name, "scaling_mode": "MANUAL", "service_mode": "SINGLE", "target_num_instances": 1, "target_state": "STARTED", "space_name": settings.SPACE_NAME } return new_service
def k8s_innerelb_service(self): new_service = { "region_name": self.region_name, "service_name": self.service_name, "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": settings.CLAAS_NAMESPACE, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "network_mode": "FLANNEL", "target_state": "STARTED", "instance_envvars": {}, "mipn_enabled": self.mipn_enabled, "version": "v2", "load_balancers": [ { "listeners": [ { "container_port": 80, "protocol": "http", "listener_port": 80 } ], "type": self.lb_type, "name": self.alb_name, "load_balancer_id": self.lb_id } ], "target_num_instances": 1, "ports": [80], "space_name": settings.SPACE_NAME } return new_service
def node_tag_service(self): new_service = { "region_name": self.region_name, "service_name": "test-node", "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "network_mode": "BRIDGE", "target_state": "STARTED", "instance_ports": [ { "ipaddress": "sharedip", "service_port": 80, "endpoint_type": "http-endpoint", "protocol": "tcp", "container_port": 80 } ], "target_num_instances": 1, "node_tag": self.node_tag, "space_name": settings.SPACE_NAME } return new_service
def elb_bridge_external_service(self): new_service = { "service_name": "elbbridge" + self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "library/nginx")), "image_tag": "latest", "region_name": self.region_name, "instance_size": "XXS", "network_mode": "BRIDGE", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "instance_envvars": {"ENV": "TestEnv"}, "load_balancers": [ { "type": self.mode, "is_internal": False, "listeners": [ { "protocol": "HTTP", "lb_port": 80, "container_port": 80 } ] } ], "instance_ports": [], "space_name": settings.SPACE_NAME } return new_service
def elb_host_internal_service(self): new_service = { "service_name": "ehi" + self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "tutum/tomcat")), "image_tag": "latest", "region_name": self.region_name, "instance_size": "XS", "network_mode": "HOST", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "required_ports": [8080], "load_balancers": [ { "type": self.mode, "is_internal": "true", "listeners": [ { "protocol": "HTTP", "lb_port": 8080, "container_port": 8080 } ] } ], "instance_ports":[], "space_name": settings.SPACE_NAME } return new_service
def trigger(pipeline_name): get_repo_url = "{}registries/{}/{}/repositories/{}".format(settings.API_URL, settings.CLAAS_NAMESPACE, settings.PRIVATE_REGISTRY, get_repo().get("service_repo", "houchao-test")) r = requests.get(get_repo_url, headers=headers) if r.status_code != 200: return {"success": False, "total": "get repositories return code error: {}, error text:{}".format(r.status_code, r.text)} repo_detail = json.loads(r.text) registry_uuid = repo_detail['registry']['uuid'] repo_uuid = repo_detail['uuid'] url = "{}pipelines/{}/history/{}".format(settings.API_URL, settings.CLAAS_NAMESPACE, pipeline_name) payload = { "namespace": settings.CLAAS_NAMESPACE, "pipeline": pipeline_name, "trigger": "repository", "data":{ "image_tag": "latest", "registry_uuid": registry_uuid, "registry": settings.PRIVATE_REGISTRY, "repository": "{}/{}".format(settings.CLAAS_NAMESPACE, get_repo().get("service_repo", "houchao-test")), "repository_uuid": repo_uuid, "type": "repository" } } r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code != 200: return {"success":False, "total": "trigger pipeline return code error: {}, error text:{}".format(r.status_code, r.text)} uuid = json.loads(r.text)["uuid"] if not get_events(uuid, "create"): return {"success": False, "total": "this action do not have events"} return { "success": True, "uuid": uuid }
def ha_heathy_check_service(self): new_service = { "image_tag": "latest", "region_name": self.region_name, "service_name": self.service_name, "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "network_mode": "BRIDGE", "target_state": "STARTED", "health_checks": [{ "protocol": "HTTP", "ignore_http1xx": False, "timeout_seconds": 20, "interval_seconds": 20, "max_consecutive_failures": 10, "path": "/", "port": 80, "grace_period_seconds": 100 }], "instance_envvars": {}, "instance_ports": [ {"ipaddress": "sharedip", "service_port": 80, "endpoint_type": "http-endpoint", "protocol": "tcp", "container_port": 80}], "target_num_instances": 1, "mipn_enabled": "True", "space_name": settings.SPACE_NAME } return new_service
def k8s_flannel_service(self): new_service = { "service_name": self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), self.namespace+ "/hello-world")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "FLANNEL", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "mipn_enabled": self.mipn_enabled, "ports": [ 80 ], "instance_envvars": { "k8s_key": "k8s_value", "__ALAUDA_FILE_LOG_PATH__": "/home/*.txt" }, "load_balancers": [ { "type": self.lb_type, "version": 1, "name": self.alb_name, "load_balancer_id": self.lb_id, "listeners":[ { "protocol": "http", "listener_port": 80, "container_port": 80, "rules": [] } ] } ], "mount_points": [ { "path": "/home/abc", "type": "raw", "value": "config" } ], "node_selector": { "ip": self.node_tag }, "volumes": [ { "app_volume_dir": "/var/log/", "volume_id": "host_path", "volume_name": "/var/log" } ], "instance_ports": [], "space_name": self.space_name, "version": "v2" } return new_service
def k8s_healthycheck_service(self): new_service = { "service_name": self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "FLANNEL", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "ports": [ 80 ], "instance_envvars": {}, "load_balancers": [ { "type": self.lb_type, "name": self.alb_name, "load_balancer_id": self.lb_id, "listeners":[ { "protocol": "http", "lb_port": 80, "container_port": 80, "domains": [] } ] } ], "volumes": [ { "app_volume_dir": "/var/log/", "volume_id": self.volume_id, "volume_name": self.volume_name } ], "health_checks": [{ "protocol": "HTTP", "ignore_http1xx": False, "timeout_seconds": 20, "interval_seconds": 20, "max_consecutive_failures": 10, "path": "/", "port": 80, "grace_period_seconds": 100 }], "instance_ports": [], "space_name": self.space_name, "version": "v2" } return new_service
def static_ip_service(self): new_service = { "region_name": self.region_name, "service_name": self.service_name, "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "library/redis")), "image_tag": settings.IMAGE_TAG, "network_mode": "STATIC_IP", "target_state": "STARTED", "instance_envvars": {}, "private_ip": self.static_ip, "target_num_instances": 1, "space_name": settings.SPACE_NAME } return new_service
def review(author, branch): repo = get_repo() if repo.is_dirty(): print('The current branch has unstash changes') return remote = get_or_create_remote(repo, author) print('Fetching for new changes') remote.fetch(branch) print('Checkout to %s' % branch) if not branch in repo.branches: repo.git.checkout('-t', '%s/%s' % (author, branch)) else: repo.git.checkout(branch) repo.git.reset('--hard', '%s/%s' % (author, branch))
def rawcontainer_bridge_service(self): new_service = { "service_name": "rb" + self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "tutum/ubuntu")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "BRIDGE", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "required_ports": [22], "load_balancers": [], "instance_ports":[], "space_name": settings.SPACE_NAME } return new_service
def k8s_host_service(self): new_service = { "service_name": self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "HOST", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "envfiles": [{"name": self.envfile}], "ports": [ 80 ], "instance_envvars": {}, "load_balancers": [ { "type": self.lb_type, "name": self.alb_name, "load_balancer_id": self.lb_id, "listeners":[ { "protocol": "http", "lb_port": 80, "container_port": 80, "domains": [] } ] } ], "volumes": [ { "app_volume_dir": "/var/log/", "volume_id": self.volume_id, "volume_name": self.volume_name } ], "instance_ports": [], "space_name": self.space_name, "version": "v2" } return new_service
def https_service(self): new_service = { "service_name": self.service_name, "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "FLANNEL", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "ports": [ 80 ], "instance_envvars": {}, "version": 1 } ],
def quota_service(self): new_service = { "service_name": self.service_name, "health_checks": [], "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "image_tag": settings.IMAGE_TAG, "instance_ports": [], "instance_size": "XXS", "linked_to_apps": "{}", "load_balancer_choice": "ENABLE", "load_balancers": [], "namespace": self.namespace, "network_mode": "FLANNEL", "region_name": self.region_name, "scaling_mode": "MANUAL", "service_mode": "SINGLE", "target_num_instances": 1, "target_state": "STARTED", "space_name": self.space_name } return new_service
def rebase(): repo = get_repo() if repo.is_dirty(): print('The current branch has unstash changes') return print('Pulling changes from master') repo.remote('newscred').fetch('master') if repo.is_dirty(): print('Please fix the rebase') return repo.git.rebase('newscred/master') print('Rebased to master') tracking_remote = get_remote_tracking_branch(repo) branch_name = repo.active_branch.name print('Force pusing to %s/%s' % (tracking_remote, branch_name)) repo.git.push(tracking_remote, branch_name, force=True)
def merge(author, branch): repo = get_repo() if repo.is_dirty(): print('The current branch has unstash changes') return remote = get_or_create_remote(repo, author) print('Fetching for new changes') remote.fetch(branch) repo.git.checkout('master') print('Pulling changes from master') repo.remote('newscred').pull('master') try: repo.git.merge('--ff-only', '%s/%s' % (author, branch)) print('Ready to be merge') except GitCommandError as error: print(error.stderr) print('Please make sure the branch %s/%s, is rebased' % (author, branch))
def heathy_check_service(self): new_service = { "image_tag": "latest", "region_name": self.region_name, "service_name": "heathy-check" + self.service_name, "instance_size": "XXS", "scaling_mode": "MANUAL", "namespace": self.namespace, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "alauda/hello-world")), "network_mode": "BRIDGE", "target_state": "STARTED", "health_checks": [{ "protocol": "HTTP", "ignore_http1xx": False, "timeout_seconds": 20, "interval_seconds": 20, "max_consecutive_failures": 10, "path": "/", "port": 80, "grace_period_seconds": 100 }], "instance_envvars": {}, "load_balancers": [{ "listeners": [{ "container_port": 80, "protocol": "HTTP", "lb_port": 80 }], "is_internal": False, "type": self.mode, "_network_type": "external" }], "target_num_instances": 1, "space_name": settings.SPACE_NAME } return new_service
def haproxy_host_service(self): new_service = { "service_name": "proxyhost" + self.service_name, "image_name": "{}/{}".format(self.get_image_registry(), get_repo().get(("service_repo"), "vipertest/e2e")), "image_tag": settings.IMAGE_TAG, "region_name": self.region_name, "instance_size": "XXS", "network_mode": "HOST", "scaling_mode": "MANUAL", "target_state": "STARTED", "target_num_instances": 1, "required_ports": [80], "envfiles": [{"name": "notDelete"}], "load_balancers": [], "instance_ports": [ { "container_port": 80, "protocol": "tcp", "endpoint_type": "http-endpoint" } ], "space_name": settings.SPACE_NAME } return new_service
def fromPHID(phid): if phid in phid_cache: return phid_cache[phid] raw = utils.get_repo(phid) return Repo(raw)