def get_local_pem_url(self, pem_url): local_pem_url = self.CERT_BASE + "/" + pem_url.split("/")[-1] if not isfile(local_pem_url): self.log.debug("Recover local %s.pem file from azure storage %s" % (local_pem_url, pem_url)) cryptor = RequiredFeature("cryptor") cryptor.recover_local_file(pem_url, local_pem_url) return local_pem_url
def is_pre_allocate_enabled(hackathon): if hackathon.status != HACK_STATUS.ONLINE: return False hack_manager = RequiredFeature("hackathon_manager") value = hack_manager.get_basic_property(hackathon, HACKATHON_BASIC_INFO.PRE_ALLOCATE_ENABLED, "1") return util.str2bool(value)
def upload_files(self): """Handle uploaded files from http request""" self.__validate_upload_files() images = [] storage = RequiredFeature("storage") for file_name in request.files: file_storage = request.files[file_name] self.log.debug("upload image file : " + file_name) context = Context( hackathon_name=g.hackathon.name, file_name=file_storage.filename, file_type=FILE_TYPE.HACK_IMAGE, content=file_storage ) context = storage.save(context) image = { "name": file_storage.filename, "url": context.url, "thumbnailUrl": context.url, "deleteUrl": '/api/admin/file?key=' + context.file_name } # context.file_name is a random name created by server, file.filename is the original name images.append(image) return {"files": images}
def run_job(mdl_cls_func, cls_args, func_args, second=DEFAULT_TICK): exec_time = get_now() + timedelta(seconds=second) scheduler = RequiredFeature("scheduler") scheduler.get_scheduler().add_job(call, 'date', run_date=exec_time, args=[mdl_cls_func, cls_args, func_args])
def get_network_config(self, azure_key_id, update): """ Return None if image type is vm and not update Public endpoint should be assigned in real time :param service: :return: """ azure_service = RequiredFeature("azure_service") if self.is_vm_image() and not update: return None cs = self.virtual_environment[self.T_CLOUD_SERVICE] nc = self.virtual_environment[self.T_NETWORK_CONFIG] network_config = ConfigurationSet() network_config.configuration_set_type = nc[self.T_CONFIGURATION_SET_TYPE] input_endpoints = nc[self.T_INPUT_ENDPOINTS] # avoid duplicate endpoint under same cloud service assigned_endpoints = azure_service.get_assigned_endpoints(azure_key_id, cs[self.SERVICE_NAME]) endpoints = map(lambda i: i[self.T_LOCAL_PORT], input_endpoints) unassigned_endpoints = map(str, find_unassigned_endpoints(endpoints, assigned_endpoints)) map(lambda (i, u): i.update({self.PORT: u}), zip(input_endpoints, unassigned_endpoints)) for input_endpoint in input_endpoints: network_config.input_endpoints.input_endpoints.append( ConfigurationSetInputEndpoint( input_endpoint[self.NAME], input_endpoint[self.PROTOCOL], input_endpoint[self.PORT], input_endpoint[self.T_LOCAL_PORT] ) ) return network_config
def upload_files(self, user_id, file_type): """Handle uploaded files from http request""" try: self.__validate_upload_files() except Exception as e: self.log.error(e) return bad_request("file size or file type unsupport") file_list = [] storage = RequiredFeature("storage") for file in request.files: file_content = request.files[file] pre_file_name = file_content.filename file_suffix = pre_file_name[pre_file_name.rfind('.'):] new_file_name = self.__generate_file_name(user_id, file_type, file_suffix) self.log.debug("upload file: " + new_file_name) context = Context( file_name=new_file_name, file_type=file_type, content=file_content ) context = storage.save(context) # file_name is a random name created by server, pre_file_name is the original name file_info = { "file_name": new_file_name, "pre_file_name": pre_file_name, "url": context.url } file_list.append(file_info) return {"files": file_list}
class DockerHelper(Component): hosted_docker = RequiredFeature("hosted_docker") alauda_docker = RequiredFeature("alauda_docker") def get_docker(self, hackathon): if hackathon.is_alauda_enabled(): return self.alauda_docker else: return self.hosted_docker
def stop_expr(self, expr_id, force=0): """ :param expr_id: experiment id :param force: 0: only stop container and release ports, 1: force stop and delete container and release ports. :return: """ self.log.debug("begin to stop %d" % expr_id) expr = self.db.find_first_object_by(Experiment, id=expr_id, status=EStatus.RUNNING) if expr is not None: # Docker if expr.template.provider == VE_PROVIDER.DOCKER: # stop containers for c in expr.virtual_environments.all(): try: self.log.debug("begin to stop %s" % c.name) docker = self.__get_docker(expr.hackathon, c) if force: docker.delete(c.name, virtual_environment=c, container=c.container, expr_id=expr_id) c.status = VEStatus.DELETED else: docker.stop(c.name, virtual_environment=c, container=c.container, expr_id=expr_id) c.status = VEStatus.STOPPED except Exception as e: self.log.error(e) self.__roll_back(expr_id) return internal_server_error( 'Failed stop/delete container') if force: expr.status = EStatus.DELETED else: expr.status = EStatus.STOPPED self.db.commit() else: try: # todo support delete azure vm hosted_docker = RequiredFeature("hosted_docker") af = AzureFormation( hosted_docker.load_azure_key_id(expr_id)) af.stop(expr_id, AVMStatus.STOPPED_DEALLOCATED) except Exception as e: self.log.error(e) return internal_server_error('Failed stopping azure') self.log.debug("experiment %d ended success" % expr_id) return ok('OK') else: return ok()
def stop_expr(self, expr_id, force=0): """ :param expr_id: experiment id :param force: 0: only stop container and release ports, 1: force stop and delete container and release ports. :return: """ self.log.debug("begin to stop %d" % expr_id) expr = self.db.find_first_object_by(Experiment, id=expr_id, status=EStatus.RUNNING) if expr is not None: # Docker if expr.template.provider == VE_PROVIDER.DOCKER: # stop containers for c in expr.virtual_environments.all(): try: self.log.debug("begin to stop %s" % c.name) docker = self.__get_docker(expr.hackathon, c) if force: docker.delete(c.name, virtual_environment=c, container=c.container, expr_id=expr_id) c.status = VEStatus.DELETED else: docker.stop(c.name, virtual_environment=c, container=c.container, expr_id=expr_id) c.status = VEStatus.STOPPED except Exception as e: self.log.error(e) self.__roll_back(expr_id) return internal_server_error('Failed stop/delete container') if force: expr.status = EStatus.DELETED else: expr.status = EStatus.STOPPED self.db.commit() else: try: # todo support delete azure vm hosted_docker = RequiredFeature("hosted_docker") af = AzureFormation(hosted_docker.load_azure_key_id(expr_id)) af.stop(expr_id, AVMStatus.STOPPED_DEALLOCATED) except Exception as e: self.log.error(e) return internal_server_error('Failed stopping azure') self.log.debug("experiment %d ended success" % expr_id) return ok('OK') else: return ok()
def get_starter(self, hackathon, template): # load expr starter starter = None if not hackathon or not template: return starter if template.provider == VE_PROVIDER.DOCKER: if HACKATHON_CONFIG.CLOUD_PROVIDER in hackathon.config: if hackathon.config[HACKATHON_CONFIG. CLOUD_PROVIDER] == CLOUD_PROVIDER.AZURE: starter = RequiredFeature("azure_docker") elif hackathon.config[HACKATHON_CONFIG. CLOUD_PROVIDER] == CLOUD_PROVIDER.ALAUDA: starter = RequiredFeature("alauda_docker") elif template.provider == VE_PROVIDER.AZURE: starter = RequiredFeature("azure_vm") return starter
def get_starter(self, hackathon, template): # load expr starter starter = None if not hackathon or not template: return starter # TODO Interim workaround for kubernetes, need real implementation if hackathon.config.get('cloud_provider') == CloudProvider.KUBERNETES: return RequiredFeature("k8s_service") if template.provider == VirtualEnvProvider.DOCKER: raise NotImplementedError() elif template.provider == VirtualEnvProvider.AZURE: raise NotImplementedError() elif template.provider == VirtualEnvProvider.K8S: starter = RequiredFeature("k8s_service") return starter
def upload_files(self): status, return_info = self.validate_args() if not status: return return_info image_container_name = self.util.safe_get_config("storage.image_container", "images") images = [] storage = RequiredFeature("storage") for file_name in request.files: file = request.files[file_name] self.log.debug("upload image file : " + file_name) context = Context( hackathon_name=g.hackathon.name, file_name=file.filename, file_type=FILE_TYPE.HACK_IMAGE, content=file ) context = storage.save(context) image = {} image['name'] = file.filename image['url'] = context.url image['thumbnailUrl'] = context.url # context.file_name is a random name created by server, file.filename is the original name image['deleteUrl'] = '/api/admin/file?key=' + context.file_name images.append(image) # for file_name in request.files: # file = request.files[file_name] # real_name = self.generate_file_name(file) # self.log.debug("upload image file : " + real_name) # # url = self.file_service.upload_file_to_azure(file, image_container_name, real_name) # if url is not None: # image = {} # image['name'] = file.filename # image['url'] = url # # frontUI components needed return values # image['thumbnailUrl'] = url # image['deleteUrl'] = '/api/file?key=' + real_name # images.append(image) # else: # return internal_server_error("upload file failed") return {"files": images}
class HostedDockerHealthCheck(HealthCheck): """Report status of hostdd docker see more on docker/hosted_docker.py """ def __init__(self): self.hosted_docker = RequiredFeature("hosted_docker_proxy") def report_health(self): return self.hosted_docker.report_health()
def __init__(self): self.__containers = { FILE_TYPE.TEMPLATE: self.util.safe_get_config("storage.azure.template_container", "templates"), FILE_TYPE.HACK_IMAGE: self.util.safe_get_config("storage.azure.image_container", "images"), FILE_TYPE.AZURE_CERT: self.util.safe_get_config("storage.azure.certificate_container", "certificate"), FILE_TYPE.USER_FILE: self.util.safe_get_config("storage.azure.user_file_container", "userfile"), FILE_TYPE.TEAM_FILE: self.util.safe_get_config("storage.azure.team_file_container", "teamfile"), FILE_TYPE.HACK_FILE: self.util.safe_get_config("storage.azure.hack_file_container", "hackfile"), } self.azure_blob_service = RequiredFeature("azure_blob_service")
class AlaudaDockerHealthCheck(HealthCheck): """Report status of Alauda service see more on docker/alauda_docker.py """ def __init__(self): self.alauda_docker = RequiredFeature("alauda_docker") def report_health(self): return self.alauda_docker.report_health()
def get_starter(self, hackathon, template): # load expr starter starter = None if not hackathon or not template: return starter # TODO Interim workaround for kubernetes, need real implementation if hackathon.config.get('cloud_provider') == CLOUD_PROVIDER.KUBERNETES: return RequiredFeature("k8s_service") if template.provider == VE_PROVIDER.DOCKER: if HACKATHON_CONFIG.CLOUD_PROVIDER in hackathon.config: if hackathon.config[HACKATHON_CONFIG.CLOUD_PROVIDER] == CLOUD_PROVIDER.AZURE: starter = RequiredFeature("azure_docker") elif hackathon.config[HACKATHON_CONFIG.CLOUD_PROVIDER] == CLOUD_PROVIDER.ALAUDA: starter = RequiredFeature("alauda_docker") elif template.provider == VE_PROVIDER.AZURE: starter = RequiredFeature("azure_vm") elif template.provider == VE_PROVIDER.K8S: starter = RequiredFeature("k8s_service") return starter
class SubscriptionService(Component): azure_adapter = RequiredFeature("azure_adapter") """ Subscription of azure resources according to given subscription id """ ERROR_RESULT = -1 def get_available_storage_account_count(self, azure_key_id): """ Get available count of storage account Return -1 if failed :return: """ try: result = self.azure_adapter.get_subscription(azure_key_id) except Exception as e: self.log.error(e) return self.ERROR_RESULT return result.max_storage_accounts - result.current_storage_accounts def get_available_cloud_service_count(self, azure_key_id): """ Get available count of cloud service Return -1 if failed :return: """ try: result = self.azure_adapter.get_subscription(azure_key_id) except Exception as e: self.log.error(e) return self.ERROR_RESULT return result.max_hosted_services - result.current_hosted_services def get_available_core_count(self, azure_key_id): """ Get available count of core Return -1 if failed :return: """ try: result = self.azure_adapter.get_subscription(azure_key_id) except Exception as e: self.log.error(e) return self.ERROR_RESULT return result.max_core_count - result.current_core_count
class DockerHostManager(Component): """Component to manage docker host server""" docker = RequiredFeature("docker") def get_available_docker_host(self, req_count, hackathon): vms = self.db.find_all_objects( DockerHostServer, DockerHostServer.container_count + req_count <= DockerHostServer.container_max_count, DockerHostServer.hackathon_id == hackathon.id) # todo connect to azure to launch new VM if no existed VM meet the requirement # since it takes some time to launch VM, # it's more reasonable to launch VM when the existed ones are almost used up. # The new-created VM must run 'cloudvm service by default(either cloud-init or python remote ssh) # todo the VM public/private IP will change after reboot, need sync the IP in db with azure in this case for docker_host in vms: if self.docker.hosted_docker.ping(docker_host): return docker_host raise Exception("No available VM.") def get_host_server_by_id(self, id): return self.db.find_first_object_by(DockerHostServer, id=id)
def __init__(self): self.hosted_docker = RequiredFeature("hosted_docker_proxy")
def get_basic_property(hackathon, property_name, default_value=None): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.get_basic_property(hackathon, property_name, default_value)
def is_auto_approve(hackathon): hack_manager = RequiredFeature("hackathon_manager") value = hack_manager.get_basic_property(hackathon, HACKATHON_CONFIG.AUTO_APPROVE, "1") return util.str2bool(value)
def is_alauda_enabled(hackathon): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.is_alauda_enabled(hackathon)
def get_pre_allocate_number(hackathon): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.get_pre_allocate_number(hackathon)
def is_pre_allocate_enabled(hackathon): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.is_pre_allocate_enabled(hackathon)
def is_auto_approve(hackathon): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.is_auto_approve(hackathon)
def is_alauda_enabled(hackathon): hack_manager = RequiredFeature("hackathon_manager") value = hack_manager.get_basic_property(hackathon, HACKATHON_BASIC_INFO.ALAUDA_ENABLED, "0") return util.str2bool(value)
def get_pre_allocate_number(hackathon): hack_manager = RequiredFeature("hackathon_manager") value = hack_manager.get_basic_property(hackathon, HACKATHON_CONFIG.PRE_ALLOCATE_NUMBER, 1) return int(value)
def __init__(self): self.hosted_docker = RequiredFeature("hosted_docker") self.alauda_docker = RequiredFeature("alauda_docker")
def is_alauda_enabled(hackathon): hack_manager = RequiredFeature("hackathon_manager") return hack_manager.get_basic_property(hackathon, HACKATHON_BASIC_INFO.ALAUDA_ENABLED, False)
def __encrypt_content(self, pem_url): encrypted_pem_url = pem_url + ".encrypted" cryptor = RequiredFeature("cryptor") cryptor.encrypt(pem_url, encrypted_pem_url) return encrypted_pem_url
def __init__(self): self.alauda_docker = RequiredFeature("alauda_docker_proxy")
def get_local_pem_url(azureKey): azure_cert_manager = RequiredFeature("azure_cert_manager") return azure_cert_manager.get_local_pem_url(azureKey.pem_url)
def test_get_expr_status(self): expr_id = 11 expr_manager = RequiredFeature("expr_manager") result = expr_manager.recycle_expr()