예제 #1
0
def _load_yaml(filepath):
    try:
        with open(filepath, 'r') as f:
            return yaml.safe_load(f)
    except:
        raise AlaudaInputError(
            'Missing or invalid compose yaml file at {}.'.format(filepath))
예제 #2
0
def load_service(service_name, service_data, namespace, region):
    image = service_data.get('image')
    if not image:
        raise AlaudaInputError('Compose file must specify image')
    image_name, image_tag = util.parse_image_name_tag(image)
    ports = load_ports(service_data)
    run_command = load_command(service_data)
    links = load_links(service_data)
    volumes = load_volumes(service_data)
    envvars = load_envvars(service_data)
    domain = load_domain(service_data)
    instance_num, instance_size = load_instance(service_data)
    scaling_mode, autoscaling_config = load_scaling_info(service_data)
    service = Service(name=service_name,
                      image_name=image_name,
                      image_tag=image_tag,
                      run_command=run_command,
                      instance_envvars=envvars,
                      instance_ports=ports,
                      volumes=volumes,
                      links=links,
                      target_num_instances=instance_num,
                      instance_size=instance_size,
                      namespace=namespace,
                      scaling_mode=scaling_mode,
                      autoscaling_config=autoscaling_config,
                      custom_domain_name=domain,
                      region_name=region)
    return service
예제 #3
0
def patch_argv(argv):
    args = copy.copy(argv)

    if not args:
        raise AlaudaInputError('Arguments cannot be empty')

    if len(args) >= 2:
        if args[1] in ['create', 'run', 'scale', 'inspect', 'start', 'stop', 'rm',
                       'enable-autoscaling', 'disable-autoscaling', 'logs', 'ps',
                       'instances', 'instance', 'instance-logs', 'exec']:
            args.insert(1, 'service')

    if len(args) == 1:
        args.append('-h')
    elif len(args) == 2 and args[1] in ['service', 'compose', 'backup', 'organization', 'build', 'app']:
        args.append('-h')
    elif len(args) == 3:
        if args[1] == 'service' and args[2] in ['create', 'run', 'scale', 'inspect', 'start', 'stop', 'rm',
                                                'enable-autoscaling', 'disable-autoscaling', 'logs',
                                                'instances', 'instance', 'instance-logs', 'exec']:
            args.append('-h')
        elif args[1] == 'compose' and args[2] in ['scale']:
            args.append('-h')
        elif args[1] == 'backup' and args[2] in ['create', 'inspect', 'rm']:
            args.append('-h')
        elif args[1] == 'organization' and args[2] in ['create', 'inspect', 'update']:
            args.append('-h')
        elif args[1] == 'build' and args[2] in ['create']:
            args.append('-h')
        elif args[1] == 'app' and args[2] in ['create']:
            args.append('-h')

    return args[1:]
예제 #4
0
def load_token():
    try:
        with open(settings.ALAUDACFG, 'r') as f:
            config = json.load(f)
            api_endpoint = config['auth']['endpoint']
            token = config['auth']['token']
            username = config['username']
            return api_endpoint, token, username
    except:
        raise AlaudaInputError('Please login first')
예제 #5
0
def toposort_services(compose_data):
    service_vertex = []
    service_edge = []
    src_keys = compose_data.keys()
    for key, value in compose_data.items():
        links = _get_linked_services(value.get('links'))
        if key not in service_vertex:
            service_vertex.append(key)
        if not set(links).issubset(set(src_keys)):
            raise AlaudaInputError("{} has invalid link name".format(links))
        else:
            for link in links:
                if link not in service_vertex:
                    service_vertex.append(link)
                service_edge.append((link, key))
    sorted_result = util.topoSort(service_vertex, service_edge)
    if sorted_result == -1:
        raise AlaudaInputError(
            "there is a circle in your service depended list")
    return sorted_result
예제 #6
0
    def _pack(self, source, target_path):
        print(
            '[alauda] Packing the source directory to {}'.format(target_path))

        if not os.path.isdir(source):
            raise AlaudaInputError(
                '{} is not a valid directory'.format(source))

        with zipfile.ZipFile(target_path, mode='w') as zf:
            for root, dirs, files in os.walk(source):
                for f in files:
                    zf.write(os.path.join(root, f),
                             os.path.relpath(os.path.join(root, f), source),
                             compress_type=zipfile.ZIP_DEFLATED)
예제 #7
0
    def create(self, repo_name, source, namespace, image_tag, commit_id):
        if not repo_name:
            raise AlaudaInputError(
                'Create build must specify repository name using -rn.')

        namespace = namespace or self.username
        repo_type = self._check_repo_type(repo_name, namespace)
        image_tag = self._check_image_tag(repo_name, namespace, image_tag)

        if repo_type == settings.BUILD_REPO_TYPE['code_repo']:
            self._trigger_build(repo_name, namespace, image_tag, commit_id)
            return True

        if not source:
            raise AlaudaInputError(
                "You need to specify source code path using -p when"
                "your repository's type is FileUpload.")
        source = os.path.abspath(source)
        timestamp = int(time.time() * 1000)
        target_name = '{}_{}.zip'.format(repo_name, timestamp)
        target_path = os.path.abspath(
            os.path.join(os.path.join(source, '..'), target_name))

        self._pack(source, target_path)

        (upload_auth_headers, upload_bucket,
         upload_object_key) = self._get_upload_auth_info(target_path)

        try:
            self._upload(target_path, upload_auth_headers, upload_bucket,
                         upload_object_key)
        finally:
            self._clean(target_path)

        self._trigger_build(repo_name, namespace, image_tag, commit_id,
                            upload_object_key)
        return True
예제 #8
0
def sort_services(compose_data):
    src_dict = compose_data.copy()
    src_keys = src_dict.keys()
    sorted_list = []
    while len(src_dict) > 0:
        for key, value in src_dict.items():
            links = _get_linked_services(value.get('links'))
            if links is None:
                sorted_list.append(key)
                del src_dict[key]
            elif not set(links).issubset(set(src_keys)):
                raise AlaudaInputError(
                    "{} has invalid link name".format(links))
            elif set(links).issubset(set(sorted_list)):
                sorted_list.append(key)
                del src_dict[key]
            else:
                continue
    return sorted_list
예제 #9
0
def resolve_extends(compose_data, file_name, vertex_list, edge_list):
    for local_service_name, local_service_data in compose_data.items():
        if 'extends' in local_service_data.keys():
            extends = local_service_data['extends']
            extends_file_name = os.path.abspath(extends['file'])
            original_service_name = extends['service']
            if extends_file_name not in vertex_list:
                vertex_list.append(extends_file_name)
            edge = (file_name, extends_file_name)
            if edge not in edge_list:
                edge_list.append(edge)
            vertex_list_tmp = deepcopy(vertex_list)
            edge_list_tmp = deepcopy(edge_list)
            result = util.topoSort(vertex_list_tmp, edge_list_tmp)
            if result is None:
                raise AlaudaInputError(
                    'There is a circular dependency in extends definitions')
            original_compose_data = _load_yaml(extends_file_name)
            resolve_extends(original_compose_data, extends_file_name,
                            vertex_list, edge_list)
            original_service_data = original_compose_data[
                original_service_name]
            for key, value in original_service_data.items():
                if key == 'links' or key == 'volumes_from':
                    continue
                elif key == 'ports':
                    merge_ports(local_service_data, value)
                elif key == 'expose':
                    merge_expose(local_service_data, value)
                elif key == 'environment':
                    merge_environment(local_service_data, value)
                elif key == 'volumes':
                    merge_volumes(local_service_data, value)
                elif key not in local_service_data.keys():
                    local_service_data[key] = value
                else:
                    continue