def _parse_instance_port(_port): result = _port.split('/') if len(result) > 2: raise AlaudaInputError('Invalid port description. (Example of valid description: 80/tcp)') try: if len(result) == 1 and _port.find(':') > -1: result = _port.split(':') result = [result[0]] port = int(result[0]) except: raise AlaudaInputError('Invalid port description. (Example of valid description: 80/tcp)') if len(result) == 2: protocol = result[1] else: if port == 80: protocol = 'http' else: protocol = 'tcp' if protocol not in ['tcp', 'http', 'internal']: raise AlaudaInputError('Invalid port protocal. Supported protocols: {tcp}') if port < 0 or port > 65535: raise AlaudaInputError('Invalid port number') return port, 'tcp', protocol
def _check_image_tag(self, repo_name, namespace, image_tag): print('[alauda] Checking if the image tag is valid') url = (self.api_endpoint + 'repositories/{}/{}'.format(namespace, repo_name)) response = requests.get(url, headers=self.headers) util.check_response(response) data = json.loads(response.text) tags = [ item['docker_repo_tag'] for item in data['build_config']['tag_configs'] ] if not image_tag and len(tags) == 1: print('[alauda] using {} as the image tag.'.format(tags[0])) image_tag = tags[0] elif not image_tag and len(tags) > 1: raise AlaudaInputError( 'please specify an image tag using -t, here is the ' 'tag list for your repo: {}'.format(tags)) elif image_tag and image_tag not in tags: raise AlaudaInputError( '"{}" is not a valid tag, here is the tag list for your repo: ' '{}'.format(image_tag, tags)) return image_tag
def parse_volume(_volume): if not isinstance(_volume, str): raise AlaudaInputError('Invalid volume description. (Example of valid description: /var/lib/data1:10:[backup_id])') result = _volume.split(':') if len(result) == 1: result.append('10') if len(result) != 2 and len(result) != 3: raise AlaudaInputError('Invalid volume description. (Example of valid description: /var/lib/data1:10:[backup_id])') path = result[0] try: size = int(result[1]) if size < VOLUME_MIN_SIZE or size > VOLUME_MAX_SIZE: raise AlaudaInputError( 'Invalid volume size {0}. Volume size must be between {1} and {2}'.format( size, VOLUME_MIN_SIZE, VOLUME_MAX_SIZE)) backup_id = None if len(result) == 3: backup_id = result[2] except AlaudaInputError as ex: raise ex except: print "except" raise AlaudaInputError('Invalid volume description. (Example of valid description: /var/lib/data1:10:[backup_id])') return path, size, backup_id
def _parse_link(_link): if not isinstance(_link, str): raise AlaudaInputError('Invalid link description. (Example of valid description: mysql:db)') result = _link.split(':') if len(result) > 2: raise AlaudaInputError('Invalid link description. (Example of valid description: mysql:db)') if len(result) == 1 or len(result[1]) == 0: return result[0], result[0] return result[0], result[1]
def _parse_scale(_name_number): result = _name_number.split('=') if len(result) != 2: raise AlaudaInputError('Invalid scale description. (Example of valid description: mysql=3)') name = result[0] try: number = int(result[1]) except: raise AlaudaInputError('Invalid scale description. (Example of valid description: mysql=3)') return name, number
def load_service(service_name, service_data, namespace, region): image = service_data.get('image') if not image: raise AlaudaInputError('Compose file must specify image') image_name, image_tag = util.parse_image_name_tag(image) ports = load_ports(service_data) run_command = load_command(service_data) links = load_links(service_data) volumes = load_volumes(service_data) envvars = load_envvars(service_data) domain = load_domain(service_data) instance_num, instance_size = load_instance(service_data) scaling_mode, autoscaling_config = load_scaling_info(service_data) service = Service(name=service_name, image_name=image_name, image_tag=image_tag, run_command=run_command, instance_envvars=envvars, instance_ports=ports, volumes=volumes, links=links, target_num_instances=instance_num, instance_size=instance_size, namespace=namespace, scaling_mode=scaling_mode, autoscaling_config=autoscaling_config, custom_domain_name=domain, region_name=region) return service
def parse_envvar(_envvar): def _parse_envvar_dict(_envvar): if len(_envvar) != 1: raise AlaudaInputError('Invalid environment variable. (Example of valid description: FOO=foo)') key = _envvar.keys()[0] value = _envvar[key] if value is None: value = '' return key, str(value) def _parse_envvar_str(_envvar): pos = _envvar.find('=') if pos != -1: key = _envvar[:pos] value = _envvar[pos + 1:] return key, value else: pos = _envvar.find(':') if pos == -1: raise AlaudaInputError('Invalid environment variable. (Example of valid description: FOO=foo)') key = _envvar[:pos] value = _envvar[pos + 1:] return key, value if isinstance(_envvar, dict): return _parse_envvar_dict(_envvar) elif isinstance(_envvar, str): return _parse_envvar_str(_envvar) else: raise AlaudaInputError('Invalid environment variable. (Example of valid description: FOO=foo)')
def _load_yaml(filepath): try: with open(filepath, 'r') as f: return yaml.safe_load(f) except: raise AlaudaInputError( 'Missing or invalid compose yaml file at {}.'.format(filepath))
def patch_argv(argv): args = copy.copy(argv) if not args: raise AlaudaInputError('Arguments cannot be empty') if len(args) >= 2: if args[1] in ['create', 'run', 'scale', 'inspect', 'start', 'stop', 'rm', 'enable-autoscaling', 'disable-autoscaling', 'logs', 'ps', 'instances', 'instance', 'instance-logs', 'exec']: args.insert(1, 'service') if len(args) == 1: args.append('-h') elif len(args) == 2 and args[1] in ['service', 'compose', 'backup', 'organization', 'build', 'app']: args.append('-h') elif len(args) == 3: if args[1] == 'service' and args[2] in ['create', 'run', 'scale', 'inspect', 'start', 'stop', 'rm', 'enable-autoscaling', 'disable-autoscaling', 'logs', 'instances', 'instance', 'instance-logs', 'exec']: args.append('-h') elif args[1] == 'compose' and args[2] in ['scale']: args.append('-h') elif args[1] == 'backup' and args[2] in ['create', 'inspect', 'rm']: args.append('-h') elif args[1] == 'organization' and args[2] in ['create', 'inspect', 'update']: args.append('-h') elif args[1] == 'build' and args[2] in ['create']: args.append('-h') elif args[1] == 'app' and args[2] in ['create']: args.append('-h') return args[1:]
def parse_image_name_tag(image): result = image.split(':') if len(result) == 1: return image, 'latest' elif len(result) == 2: return result[0], result[1] else: raise AlaudaInputError('Invalid image name')
def _parse_envvar_dict(_envvar): if len(_envvar) != 1: raise AlaudaInputError('Invalid environment variable. (Example of valid description: FOO=foo)') key = _envvar.keys()[0] value = _envvar[key] if value is None: value = '' return key, str(value)
def load_token(): try: with open(settings.ALAUDACFG, 'r') as f: config = json.load(f) api_endpoint = config['auth']['endpoint'] token = config['auth']['token'] username = config['username'] return api_endpoint, token, username except: raise AlaudaInputError('Please login first')
def parse_autoscale_info(info): if info is None: return 'MANUAL', {} mode = info[0] cfg_file = info[1] if mode: try: fp = file(cfg_file) except: raise AlaudaInputError('can not open auto-scaling config file-> {}.'.format(cfg_file)) try: cfg_json = json.load(fp) fp.close() except: fp.close() raise AlaudaInputError('Parse {} fail! The format refer to ./auto-scaling.cfg'.format(cfg_file)) return 'AUTO', json.dumps(cfg_json) else: return 'MANUAL', {}
def toposort_services(compose_data): service_vertex = [] service_edge = [] src_keys = compose_data.keys() for key, value in compose_data.items(): links = _get_linked_services(value.get('links')) if key not in service_vertex: service_vertex.append(key) if not set(links).issubset(set(src_keys)): raise AlaudaInputError("{} has invalid link name".format(links)) else: for link in links: if link not in service_vertex: service_vertex.append(link) service_edge.append((link, key)) sorted_result = util.topoSort(service_vertex, service_edge) if sorted_result == -1: raise AlaudaInputError( "there is a circle in your service depended list") return sorted_result
def _parse_envvar_str(_envvar): pos = _envvar.find('=') if pos != -1: key = _envvar[:pos] value = _envvar[pos + 1:] return key, value else: pos = _envvar.find(':') if pos == -1: raise AlaudaInputError('Invalid environment variable. (Example of valid description: FOO=foo)') key = _envvar[:pos] value = _envvar[pos + 1:] return key, value
def merge_internal_external_ports(ports, exposes): expose_list = [] if exposes is None: return expose_list for expose in exposes: if not str(expose).isdigit() or int(expose) < 0 or int(expose) > 65535: raise AlaudaInputError('Invalid port number') expose = int(expose) if expose in ports: continue result = {"container_port": expose, "protocol": 'tcp', 'endpoint_type': 'internal-endpoint'} if result not in expose_list: expose_list.append(result) return expose_list
def _pack(self, source, target_path): print( '[alauda] Packing the source directory to {}'.format(target_path)) if not os.path.isdir(source): raise AlaudaInputError( '{} is not a valid directory'.format(source)) with zipfile.ZipFile(target_path, mode='w') as zf: for root, dirs, files in os.walk(source): for f in files: zf.write(os.path.join(root, f), os.path.relpath(os.path.join(root, f), source), compress_type=zipfile.ZIP_DEFLATED)
def create(self, repo_name, source, namespace, image_tag, commit_id): if not repo_name: raise AlaudaInputError( 'Create build must specify repository name using -rn.') namespace = namespace or self.username repo_type = self._check_repo_type(repo_name, namespace) image_tag = self._check_image_tag(repo_name, namespace, image_tag) if repo_type == settings.BUILD_REPO_TYPE['code_repo']: self._trigger_build(repo_name, namespace, image_tag, commit_id) return True if not source: raise AlaudaInputError( "You need to specify source code path using -p when" "your repository's type is FileUpload.") source = os.path.abspath(source) timestamp = int(time.time() * 1000) target_name = '{}_{}.zip'.format(repo_name, timestamp) target_path = os.path.abspath( os.path.join(os.path.join(source, '..'), target_name)) self._pack(source, target_path) (upload_auth_headers, upload_bucket, upload_object_key) = self._get_upload_auth_info(target_path) try: self._upload(target_path, upload_auth_headers, upload_bucket, upload_object_key) finally: self._clean(target_path) self._trigger_build(repo_name, namespace, image_tag, commit_id, upload_object_key) return True
def sort_services(compose_data): src_dict = compose_data.copy() src_keys = src_dict.keys() sorted_list = [] while len(src_dict) > 0: for key, value in src_dict.items(): links = _get_linked_services(value.get('links')) if links is None: sorted_list.append(key) del src_dict[key] elif not set(links).issubset(set(src_keys)): raise AlaudaInputError( "{} has invalid link name".format(links)) elif set(links).issubset(set(sorted_list)): sorted_list.append(key) del src_dict[key] else: continue return sorted_list
def _check_repo_type(self, repo_name, namespace): print("[alauda] Checking the repository's type") url = (self.api_endpoint + 'repositories/{}/{}'.format(namespace, repo_name)) response = requests.get(url, headers=self.headers) util.check_response(response) data = json.loads(response.text) if not data['is_automated']: raise AlaudaInputError( '{} is not an automated buid repo.'.format(repo_name)) if data['build_config']['code_repo_client'] == 'FileUpload': print("[alauda] The repository's client type you specified " "is FileUpload") return settings.BUILD_REPO_TYPE['file'] else: print("[alauda] The repository's client type you specified " "is {}".format(data['build_config']['code_repo_client'])) return settings.BUILD_REPO_TYPE['code_repo']
def parse_time(start_time, end_time): if start_time is not None and end_time is not None: try: start = time.strptime(start_time, '%Y-%m-%d %H:%M:%S') end = time.strptime(end_time, '%Y-%m-%d %H:%M:%S') start = int(time.mktime(start)) end = int(time.mktime(end)) return start, end except: raise AlaudaInputError('Please make sure time format like 2015-05-01 12:00:00') elif start_time is None and end_time is None: end = int(time.time()) start = end - 900 elif start_time is not None: start = time.strptime(start_time, '%Y-%m-%d %H:%M:%S') start = int(time.mktime(start)) end = int(time.time()) else: end = time.strptime(end_time, '%Y-%m-%d %H:%M:%S') end = int(time.mktime(end)) start = end - 900 return start, end
def resolve_extends(compose_data, file_name, vertex_list, edge_list): for local_service_name, local_service_data in compose_data.items(): if 'extends' in local_service_data.keys(): extends = local_service_data['extends'] extends_file_name = os.path.abspath(extends['file']) original_service_name = extends['service'] if extends_file_name not in vertex_list: vertex_list.append(extends_file_name) edge = (file_name, extends_file_name) if edge not in edge_list: edge_list.append(edge) vertex_list_tmp = deepcopy(vertex_list) edge_list_tmp = deepcopy(edge_list) result = util.topoSort(vertex_list_tmp, edge_list_tmp) if result is None: raise AlaudaInputError( 'There is a circular dependency in extends definitions') original_compose_data = _load_yaml(extends_file_name) resolve_extends(original_compose_data, extends_file_name, vertex_list, edge_list) original_service_data = original_compose_data[ original_service_name] for key, value in original_service_data.items(): if key == 'links' or key == 'volumes_from': continue elif key == 'ports': merge_ports(local_service_data, value) elif key == 'expose': merge_expose(local_service_data, value) elif key == 'environment': merge_environment(local_service_data, value) elif key == 'volumes': merge_volumes(local_service_data, value) elif key not in local_service_data.keys(): local_service_data[key] = value else: continue