def draw(self, surface): rot_image = pygame.transform.rotate(self.image, -self.heading) rect = rot_image.get_rect() rect.center = self.location surface.blit(rot_image, rect) if conf.get()["aircraft"]["draw_radius"] == True: pygame.draw.circle(surface, (255, 255, 0), self.location, conf.get()["aircraft"]["collision_radius"], 1) # Draw lines and waypoints if selected if self.selected == True: point_list = [] point_list.append(self.location) for x in range(0, len(self.waypoints) - 1): point_list.append(self.waypoints[x].getLocation()) self.waypoints[x].draw(surface) point_list.append(self.waypoints[-1].getLocation()) pygame.draw.lines(surface, (255, 255, 0), False, point_list) # Draw the ident string next to the aircraft? x = self.location[0] + 20 y = self.location[1] list = [self.ident, "FL" + str(self.altitude / 100), str(self.speed) + "kts"] for line in list: id = self.font.render(line, False, self.fs_font_color) r = surface.blit(id, (x, y)) y = y + self.font.get_height()
def __handleAircraftCollision(self, ac1, ac2): if( Utility.locDistSq(ac1.getLocation(), ac2.getLocation()) < (conf.get()['aircraft']['collision_radius'] ** 2) ): if not self.demomode: self.gameEndCode = conf.get()['codes']['ac_collide'] self.score += conf.get()['scoring']['ac_collide'] # Highlight the collided aircraft ac1.image = Aircraft.AC_IMAGE_NEAR # later set to Aircraft.AC_IMAGE_COLLIDED ac2.image = Aircraft.AC_IMAGE_NEAR
def __handleUserInteraction(self): for event in pygame.event.get(): if(event.type == pygame.MOUSEBUTTONUP): self.highEnd = conf.get()['codes']['user_end'] elif(event.type == pygame.QUIT): self.highEnd = conf.get()['codes']['user_end'] elif(event.type == pygame.KEYUP): if(event.key == pygame.K_ESCAPE): self.highEnd = conf.get()['codes']['user_end']
def get_oauth2_access_token(self, code): params = {'appid': conf.get('appid'), 'secret': conf.get('appsecret'), 'code': code, 'grant_type': 'authorization_code'} return_json = api_request('get', conf.get('oauth2_api_url'), params) if return_json.has_key('access_token'): return return_json raise ErrorMsg(return_json)
def getActivePlugins(cls): result = [] for section in conf.Parser.sections(): if not section.startswith("plugin:"): continue if not conf.asBool(conf.get(section, "enabled")): continue result.append(conf.get(section, "module")) return result
def get_access_token(self): '''获取access_token ''' params = {'grant_type': 'client_credential', 'appid': conf.get('appid'), 'secret': conf.get('appsecret')} return_json = api_request('get', conf.get('token_api_url'), params) if return_json.has_key('access_token'): return return_json raise ErrorMsg(return_json)
def pid(): """ Returns the pid of the rethinkdb server. """ db_pid = None if os.path.exists(conf.get('db_pid')): with open(conf.get('db_pid')) as fin: db_pid = int(fin.readline()) return db_pid
def refresh_oauth2_access_token(self, refresh_token): params = {'appid': conf.get('appid'), 'grant_type': 'refresh_token', 'refresh_token': refresh_token} return_json = api_request('get', conf.get('oauth2_refreshtoken_api_url'), params) if return_json.has_key('access_token'): return return_json raise ErrorMsg(return_json)
def __init__(self, *args, **kwargs): super(Scripto, self).__init__(*args, **kwargs) self.handlers = {} self.meta = {} # make a list of scripts used on this network scripts = set(conf.get("scripts", tag=self.tag)) for chan in self.chans: scripts.update(conf.get("scripts", tag=self.tag, chan=chan)) # load them as needed self.logger.log(OTHER, "imported: [%s]" % ", ".join( [name for name in scripts if self._loadscript(name) == 2] ))
def status(): """ Simply print the status of the database. """ if alive(): print('PID:', str(pid()), 'Root:', conf.get('db_root')) print('-' * 60) print(''.join(log(5))) elif os.path.exists(conf.get('db_pid')): print('database improperly shut down') else: print('database not running')
def parse_db_log(args): """ Parse: awe db log """ print('Last {0} lines of {1}'.format(args.lines, conf.get('db_log'))) print('-' * 60) print(''.join(dbc.log(int(args.lines))))
def __init(): """Initialize module. Establish UDP connection with remote end, given in config.""" global sock, dstaddr sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) port = conf.get("plog", "port") dstaddr = ("localhost", int(port))
def getActivePlugins(cls): result = [] for plugin in conf.get("plugins"): if not plugin["enabled"]: continue result.append(plugin["module"]) return result
def from_file(self,menu_name): data = conf.get()["menus"][menu_name] self.add_title(data['title']) self.titlecolour = data['titleRGB'] self.titlesize = data['titlesize'] for button in data['buttons']: self.add_button(button['text'],button['code'])
def test_get(): try: with open(conf.GCONF_FILE, 'w') as fout: json.dump({'hello': 'world'}, fout) assert conf.get('hello') == 'world' finally: util.delete_it(conf.GCONF_FILE)
def check_auth(self, access_token, openid): params = {'access_token': access_token, 'openid': openid} return_json = api_request('get', conf.get('check_auth_api_url'), params) if return_json['errcode'] == 0 and return_json['errmsg'] == 'ok': return True return False
def generateGameDestinations(screen_w, screen_h): ret = [] for x in range(0, conf.get()['game']['n_destinations']): randx = random.randint( 20, screen_w - 20 ) randy = random.randint( 20, screen_h - 20 ) dest = Destination((randx, randy), "D" + str(x)) ret.append(dest) return ret
def log(msg): if not request: import conf if conf.get("logging") or False: print repr(msg) from utils import cstr debug_log.append(cstr(msg))
def __update(self): #1: Update the positions of all existing aircraft #2: Check if any aircraft have collided with an obstacle #3: Check if any aircraft have reached a destination ac_removal = [] for n in range(0, len(self.aircraft)): a = self.aircraft[n] #Update positions and redraw reachdest = a.update() if(reachdest == True): #Schedule aircraft for removal ac_removal.append(a) self.score += conf.get()['scoring']['reach_dest'] else: a.draw(self.screen) #Check collisions self.__highlightImpendingCollision(a) for ac_t in self.aircraft: if(ac_t != a): self.__handleAircraftCollision(ac_t, a) for a in ac_removal: if(self.ac_selected == a): self.requestSelected(None) self.aircraft.remove(a) self.cnt_fspane.remove(a.getFS()) self.cnt_fspane.repaint() #4: Spawn new aircraft due for spawning (or if in demo, regenerate list if none left) if(len(self.aircraftspawntimes) != 0): if self.ms_elapsed >= self.aircraftspawntimes[0]: sp = self.aircraftspawns[0] if(len(self.aircraft) < math.floor(Game.FSPANE_H / 60)): ac = Aircraft(self, sp.getSpawnPoint(), conf.get()['aircraft']['speed_default'], sp.getDestination(), "BA" + str(random.randint(1, 100))) self.aircraft.append(ac) self.cnt_fspane.addNewFlightStrip(ac) self.aircraftspawns.remove(sp) self.aircraftspawntimes.remove(self.aircraftspawntimes[0]) elif self.demomode: self.ms_eleapsed = 0 self.__generateAircraftSpawnEvents() print("reset")
def get_user_list(self, access_token, next_openid=''): params = {'access_token': access_token, 'next_openid': next_openid} return_json = api_request('get', conf.get('userlist_api_url'), params) if return_json.has_key('total'): return return_json # raise ErrorMsg raise ErrorMsg(return_json)
def _get_handlers(self, chan): """ yield handlers for channel in form: {Privmsg: f1, "title": f2, onload: f3}, {...} if chan is None, return handlers for network """ for name in conf.get("scripts", self.tag, chan): if name in g_scripts: yield g_scripts[name]
def main(): "Script execution entry point" pidpath = conf.get("plog", "pidpath", "") if not pidpath: sys.exit("Error: missing pidpath in app.ini") # go to background __daemonize() # save our pid with open(pidpath, "w") as fp: fp.write("%d\n" % os.getpid()) # need to call init again because daemonize closed all FDs __init() # run the loop run(conf.get("app", "name", "UNKNOWN"))
def log(lines=25): """ Get the last lines of the log file. """ try: with open(conf.get('db_log')) as fin: lines = fin.readlines()[-lines:] except IOError: lines = ['Log file not yet created.'] return lines
def start(): """ Start the database. """ if pid(): return if not os.path.exists(os.path.join(conf.get('db_root'), 'metadata')): util.command('rethinkdb create -d ' + conf.get('db_root')) util.command('rethinkdb serve --config-file ' + conf.DB_FILE) start_time = time.time() while True: try: db.common.connect() break except r.errors.ReqlDriverError: time.sleep(1) if (time.time() - start_time) > MAX_TIME: raise DBTimeout()
def get_userinfo(self, access_token, openid, lang='zh_CN'): '''@param:lang value of lang is the one of following:zh_CN,zh_TW,en ''' params = {'access_token': access_token, 'openid': openid, 'lang': lang} return_json = api_request('get', conf.get('oauth2_userinfo_api_url'), params) if return_json.has_key('openid'): return return_json raise ErrorMsg(return_json)
def __displayPostGameDialog(self): #Do post-loop actions (game over dialogs) if(self.gameEndCode != conf.get()['codes']['user_end'] and self.gameEndCode != conf.get()['codes']['kill']): l = gui.Label("Game Over!") b = gui.Button("OK") # Not nice... but one way of passing by reference! # A list is a mutable object, while an int isn't -- that's why I'm using a list # Wait for Python 3 to allow assigning non-global variable in outer scope (keyword: nonlocal) bob = [False] def okcb(b): b[0] = True b.connect(gui.CLICK,okcb,bob) c = gui.Container() if(self.gameEndCode == conf.get()['codes']['ac_collide']): # Check if sound is playing and if not play it. (Probably never happen in this call) if not self.channel_collision.get_busy(): self.channel_collision.play(self.sound_collision) c.add(gui.Label("COLLISION!!!!"), 0, 0) elif(self.gameEndCode == conf.get()['codes']['time_up']): c.add(gui.Label("Time up!"), 0, 0) c.add(b,0,30) d = gui.Dialog(l, c) d.open() self.app.update(self.screen) pygame.display.flip() #pygame.time.delay(3000) clock = pygame.time.Clock() while(not bob[0]): timepassed = clock.tick(conf.get()['game']['framerate']) for e in pygame.event.get(): self.app.event(e) self.app.repaint() self.app.update(self.screen) pygame.display.flip()
def init (self, level): # choose help message: # get data for weightings time = level.pause_time solved = conf.get('solve_methods', [])[:conf.SOLVE_HISTORY_SIZE] solved.reverse() num_solved = len(solved) if num_solved < conf.HELP_MSG_MIN_SOLVED: solve_ratio = 0 else: player_solved = max(solved.count(True), .1) solve_ratio = float(solved.count(False)) / player_solved try: # number solved by the player since the last autosolve self_solved = solved.index(False) except ValueError: self_solved = num_solved # calculate weightings data = (None, time, solve_ratio, self_solved) pool_names = conf.HELP_MSG_POOL_NAMES fns = conf.HELP_MSG_WEIGHTING_FNS weightings = [] for pool, arg in zip(pool_names, data): weightings.append(max(fns[pool](arg), 0)) # choose pool (weighted) cumulative = [] last = 0 for w in weightings: last += w cumulative.append(last) index = bisect(cumulative, cumulative[-1] * random()) index = min(index, len(pool_names) - 1) pool = conf.HELP_MSG_POOLS[pool_names[index]] # choose message from pool (not weighted) help_msg = choice(pool) # create menu if level.solving: bs = (menu.Button('Stop solving', self._quit_then, level.stop_solving),) else: bs = (menu.Button('Help', self.set_page, 1), menu.Button('Reset', self._reset, level)) menu.Menu.init(self, ( (menu.Button('Continue', self.game.quit_backend),) + bs + \ (menu.Button('Quit', self.game.quit_backend, 2),), ( menu.LongText(self, help_msg, 14), menu.Button('Keep trying', self.game.quit_backend), menu.Button('Show me how', self._quit_then, level.launch_solver) ) ))
def __init__(self, screen): #Imagey type stuff self.screen = screen self.SCREEN_W = screen.get_size()[0] self.SCREEN_H = screen.get_size()[1] self.font = pygame.font.Font(None, 30) #self.font = pygame.font.Font(None, 30) self.highEnd = 0 self.selection = 0 self.hiScore = Highs('score.txt',conf.get()['game']['n_highscores']) self.hiScore.load() self.myScores = self.hiScore['default'] self.scoretable = ""
def __init__(self, hosts=None): self.__hosts = hosts if not self.__hosts: self.__hosts = conf.get("plow", "hosts").split(",") if not self.__hosts: self.__hosts = ["localhost:11336"] self.__socket = None self.__transport = None self.__protocol = None self.__service = None self.setup()
def _download(self, url, filename): cache_dir = conf.get('cache_dir') path = os.path.join(cache_dir, filename) if os.access(path, os.R_OK): return if not os.access(cache_dir, os.F_OK): os.makedirs(cache_dir) response = self._request(url) fp = open(path, "w") fp.write(response.read()) fp.close()
def _get_tolerations(self, tolerations): return get_tolerations( tolerations=tolerations, default_tolerations=conf.get(TOLERATIONS_EXPERIMENTS))
def _get_affinity(self, affinity): return get_affinity(affinity=affinity, default_affinity=conf.get(AFFINITIES_EXPERIMENTS))
def get_job_docker_image(job_docker_image): return job_docker_image or conf.get('JOB_DOCKERIZER_IMAGE')
def _get_kv_env_vars(self, env_vars): return get_env_vars( env_vars=env_vars, default_env_vars=conf.get(ENV_VARS_EXPERIMENTS))
def _get_default_tensorboard_config(): specification = TensorboardSpecification.create_specification( {'image': conf.get('TENSORBOARD_DOCKER_IMAGE')}) return {'config': specification}
def _get_affinity(self, affinity): return get_affinity(affinity=affinity, default_affinity=conf.get('AFFINITY_BUILDS'))
def get_notebook_token(self): return get_hmac(conf.get(APP_LABELS_NOTEBOOK), self.project_uuid)
def _get_tolerations(self, tolerations): return get_tolerations( tolerations=tolerations, default_tolerations=conf.get('TOLERATIONS_BUILDS'))
def _get_secret_refs(self, secret_refs): return get_secret_refs( secret_refs=secret_refs, default_secret_refs=conf.get(K8S_SECRETS_EXPERIMENTS))
def start_dockerizer(build_job): # Update job status to show that its started build_job.set_status(JobLifeCycle.SCHEDULED) spawner_class = get_spawner_class(build_job.backend) try: registry_spec = get_registry_context(build_backend=build_job.backend) except ContainerRegistryError: build_job.set_status( JobLifeCycle.FAILED, message= 'Could not start the dockerizer job, please check your registry configuration.' ) return spawner = spawner_class(project_name=build_job.project.unique_name, project_uuid=build_job.project.uuid.hex, job_name=build_job.unique_name, job_uuid=build_job.uuid.hex, commit=build_job.commit, from_image=build_job.build_image, dockerfile_path=build_job.build_dockerfile, context_path=build_job.build_context, image_tag=build_job.uuid.hex, image_name=get_image_name( build_job=build_job, registry_host=registry_spec.host), build_steps=build_job.build_steps, env_vars=build_job.build_env_vars, nocache=build_job.build_nocache, insecure=registry_spec.insecure, creds_secret_ref=registry_spec.secret, creds_secret_keys=registry_spec.secret_keys, spec=build_job.specification, k8s_config=conf.get(K8S_CONFIG), namespace=conf.get(K8S_NAMESPACE), in_cluster=True, use_sidecar=True) error = {} try: results = spawner.start_dockerizer( resources=build_job.resources, # TODO: resources node_selector=build_job.node_selector, affinity=build_job.affinity, tolerations=build_job.tolerations) auditor.record(event_type=BUILD_JOB_STARTED, instance=build_job) build_job.definition = get_job_definition(results) build_job.save(update_fields=['definition']) return True except ApiException: _logger.error( 'Could not start build job, please check your polyaxon spec', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a Kubernetes ApiException.' } except VolumeNotFoundError as e: _logger.error( 'Could not start build job, please check your volume definitions.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job, encountered a volume definition problem. %s' % e } except Exception as e: _logger.error( 'Could not start build job, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start build job encountered an {} exception.'.format( e.__class__.__name__) } finally: if error.get('raised'): build_job.set_status(JobLifeCycle.FAILED, message=error.get('message'), traceback=error.get('traceback'))
def get_default_spawner(): if conf.get(BUILD_JOBS_BACKEND) == BuildBackend.NATIVE: return DockerizerSpawner elif conf.get(BUILD_JOBS_BACKEND) == BuildBackend.KANIKO: return KanikoSpawner return DockerizerSpawner
def _get_service_account_name(self): service_account_name = None if conf.get('K8S_RBAC_ENABLED') and conf.get( 'K8S_SERVICE_ACCOUNT_BUILDS'): service_account_name = conf.get('K8S_SERVICE_ACCOUNT_BUILDS') return service_account_name
def start_tensorboard(self, outputs_path, persistence_outputs, outputs_specs=None, outputs_refs_jobs=None, outputs_refs_experiments=None, resources=None, node_selector=None, affinity=None, tolerations=None): ports = [self.request_tensorboard_port()] target_ports = [self.PORT] volumes, volume_mounts = get_pod_outputs_volume(persistence_outputs) refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes( outputs_refs=outputs_refs_jobs, persistence_outputs=persistence_outputs) volumes += refs_volumes volume_mounts += refs_volume_mounts refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes( outputs_refs=outputs_specs, persistence_outputs=persistence_outputs) volumes += refs_volumes volume_mounts += refs_volume_mounts refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes( outputs_refs=outputs_refs_experiments, persistence_outputs=persistence_outputs) volumes += refs_volumes volume_mounts += refs_volume_mounts # Add volumes for persistence outputs secrets stores_secrets = get_stores_secrets(specs=outputs_specs) self.validate_stores_secrets_keys(stores_secrets=stores_secrets) secrets_volumes, secrets_volume_mounts = self.get_stores_secrets_volumes( stores_secrets=stores_secrets) volumes += secrets_volumes volume_mounts += secrets_volume_mounts resource_name = self.resource_manager.get_resource_name() # Get persistence outputs secrets auth commands command_args = self.get_stores_secrets_command_args( stores_secrets=stores_secrets) command_args.append("tensorboard --logdir={} --port={}".format( outputs_path, self.PORT)) args = [' && '.join(command_args)] command = ["/bin/sh", "-c"] deployment = self.resource_manager.get_deployment( resource_name=resource_name, volume_mounts=volume_mounts, volumes=volumes, labels=self.resource_manager.labels, env_vars=None, command=command, args=args, persistence_outputs=persistence_outputs, outputs_refs_jobs=outputs_refs_jobs, outputs_refs_experiments=outputs_refs_experiments, resources=resources, ephemeral_token=None, node_selector=node_selector, affinity=affinity, tolerations=tolerations, ports=target_ports, restart_policy=None) dep_resp, _ = self.create_or_update_deployment(name=resource_name, data=deployment) service = services.get_service( namespace=self.namespace, name=resource_name, labels=self.resource_manager.get_labels(), ports=ports, target_ports=target_ports, service_type=self._get_service_type()) service_resp, _ = self.create_or_update_service(name=resource_name, data=service) results = { 'deployment': dep_resp.to_dict(), 'service': service_resp.to_dict() } if self._use_ingress(): annotations = json.loads(conf.get('K8S_INGRESS_ANNOTATIONS')) paths = [{ 'path': '/tensorboards/{}'.format(self.project_name.replace('.', '/')), 'backend': { 'serviceName': resource_name, 'servicePort': ports[0] } }] ingress = ingresses.get_ingress( namespace=self.namespace, name=resource_name, labels=self.resource_manager.get_labels(), annotations=annotations, paths=paths) self.create_or_update_ingress(name=resource_name, data=ingress) return results
def start_tensorboard(tensorboard): # Update job status to show that its started tensorboard.set_status(JobLifeCycle.SCHEDULED) spawner = TensorboardSpawner(project_name=tensorboard.project.unique_name, project_uuid=tensorboard.project.uuid.hex, job_name=tensorboard.unique_name, job_uuid=tensorboard.uuid.hex, k8s_config=conf.get(K8S_CONFIG), namespace=conf.get(K8S_NAMESPACE), version=conf.get(CHART_VERSION), job_docker_image=tensorboard.build_image, in_cluster=True) error = {} outputs_specs, tensorboard_paths = tensorboard.outputs_path try: results = spawner.start_tensorboard( outputs_path=tensorboard_paths, persistence_outputs=tensorboard.persistence_outputs, outputs_specs=outputs_specs, outputs_refs_jobs=tensorboard.outputs_refs_jobs, outputs_refs_experiments=tensorboard.outputs_refs_experiments, resources=tensorboard.resources, labels=tensorboard.labels, annotations=tensorboard.annotations, node_selector=tensorboard.node_selector, affinity=tensorboard.affinity, tolerations=tensorboard.tolerations, max_restarts=get_max_restart(tensorboard.max_restarts, conf.get(MAX_RESTARTS_TENSORBOARDS)), reconcile_url=get_tensorboard_reconcile_url( tensorboard.unique_name)) tensorboard.definition = get_job_definition(results) tensorboard.save(update_fields=['definition']) return except ApiException: _logger.error( 'Could not start tensorboard, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start the job, encountered a Kubernetes ApiException.', } except StoreNotFoundError as e: _logger.error( 'Could not start the tensorboard, please check your volume definitions.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start the job, encountered a volume definition problem. %s' % e, } except TensorboardValidation as e: _logger.error( 'Could not start the tensorboard, ' 'some experiments require authenticating to stores with different access.', exc_info=True) error = { 'raised': True, 'traceback': None, 'message': 'Could not start the tensorboard, ' 'some experiments require authenticating ' 'to stores with different access. %s' % e, } except Exception as e: _logger.error( 'Could not start tensorboard, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start tensorboard encountered an {} exception.'.format( e.__class__.__name__) } finally: if error.get('raised'): tensorboard.set_status(JobLifeCycle.FAILED, message=error.get('message'), traceback=error.get('traceback'))
def get_job_docker_image_pull_policy(job_docker_image_pull_policy): return job_docker_image_pull_policy or conf.get('JOB_DOCKERIZER_IMAGE_PULL_POLICY')
def start_experiment(experiment): # Update experiment status to show that its started experiment.set_status(ExperimentLifeCycle.SCHEDULED) project = experiment.project group = experiment.experiment_group job_docker_image = None # This will force the spawners to use the default docker image if experiment.specification.build: try: image_name, image_tag = get_image_info(build_job=experiment.build_job) except (ValueError, AttributeError): _logger.error('Could not start the experiment.', exc_info=True) experiment.set_status(ExperimentLifeCycle.FAILED, message='Image info was not found.') return job_docker_image = '{}:{}'.format(image_name, image_tag) _logger.info('Start experiment with built image `%s`', job_docker_image) else: _logger.info('Start experiment with default image.') spawner_class = get_spawner_class(backend=experiment.backend, framework=experiment.specification.framework) # token_scope = RedisEphemeralTokens.get_scope(experiment.user.id, # 'experiment', # experiment.id) error = {} try: # Use spawners to start the experiment spawner = spawner_class(project_name=project.unique_name, experiment_name=experiment.unique_name, experiment_group_name=group.unique_name if group else None, project_uuid=project.uuid.hex, experiment_group_uuid=group.uuid.hex if group else None, experiment_uuid=experiment.uuid.hex, persistence_config=experiment.persistence_config, outputs_refs_experiments=experiment.outputs_refs_experiments, outputs_refs_jobs=experiment.outputs_refs_jobs, original_name=experiment.original_unique_name, cloning_strategy=experiment.cloning_strategy, spec=experiment.specification, k8s_config=conf.get('K8S_CONFIG'), namespace=conf.get('K8S_NAMESPACE'), in_cluster=True, job_docker_image=job_docker_image, use_sidecar=True) # Create db jobs create_experiment_jobs(experiment=experiment, spawner=spawner) # Create k8s jobs response = spawner.start_experiment() # handle response handle_experiment(experiment=experiment, response=response) experiment.set_status(ExperimentLifeCycle.STARTING) except ApiException as e: _logger.error('Could not start the experiment, please check your polyaxon spec.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start the experiment, encountered a Kubernetes ApiException.' } except VolumeNotFoundError as e: _logger.error('Could not start the experiment, please check your volume definitions.', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start the experiment, ' 'encountered a volume definition problem, %s.' % e } except Exception as e: _logger.error('Could not start the experiment, please check your polyaxon spec', exc_info=True) error = { 'raised': True, 'traceback': traceback.format_exc(), 'message': 'Could not start the experiment encountered an {} exception.'.format( e.__class__.__name__) } finally: if error.get('raised'): experiment.set_status( ExperimentLifeCycle.FAILED, message=error.get('message'), traceback=error.get('traceback'))
def _get_node_selector(self, node_selector): return get_node_selector( node_selector=node_selector, default_node_selector=conf.get('NODE_SELECTOR_BUILDS'))
def get_pod_outputs_volume(persistence_outputs): persistence_outputs = validate_persistence_outputs( persistence_outputs=persistence_outputs) return get_volume_from_definition( volume_name=persistence_outputs, volume_settings=conf.get(PERSISTENCE_OUTPUTS))
def record(obj: 'Cluster') -> None: import auditor import conf from event_manager.events.cluster import CLUSTER_CREATED auditor.record( event_type=CLUSTER_CREATED, instance=obj, namespace=conf.get('K8S_NAMESPACE'), environment=conf.get('POLYAXON_ENVIRONMENT'), is_upgrade=conf.get('CHART_IS_UPGRADE'), node_selector_core_enabled=bool(conf.get('NODE_SELECTOR_CORE')), node_selector_experiments_enabled=bool( conf.get('NODE_SELECTOR_EXPERIMENTS')), node_selector_jobs_enabled=bool(conf.get('NODE_SELECTOR_JOBS')), node_selector_builds_enabled=bool( conf.get('NODE_SELECTOR_BUILDS')), cli_min_version=conf.get('CLI_MIN_VERSION'), cli_latest_version=conf.get('CLI_LATEST_VERSION'), platform_min_version=conf.get('PLATFORM_LATEST_VERSION'), platform_latest_version=conf.get('PLATFORM_MIN_VERSION'), chart_version=conf.get('CHART_VERSION'))
def _get_config_map_refs(self, config_map_refs): return get_config_map_refs( config_map_refs=config_map_refs, default_config_map_refs=conf.get(K8S_CONFIG_MAPS_EXPERIMENTS))
def _get_pod_resources(self, resources): return get_pod_resources( resources=resources, default_resources=conf.get(K8S_RESOURCES_EXPERIMENTS))
def _get_annotations(self, annotations): return get_annotations( annotations=annotations, default_annotations=conf.get(ANNOTATIONS_EXPERIMENTS))
def process_experiment_jobs_logs(experiment: 'Experiment', temp: bool = True) -> None: k8s_manager = K8SManager(namespace=conf.get('K8S_NAMESPACE'), in_cluster=True) for experiment_job in experiment.jobs.all(): process_experiment_job_logs(experiment_job=experiment_job, temp=temp, k8s_manager=k8s_manager)
def _get_node_selector(self, node_selector): return get_node_selector( node_selector=node_selector, default_node_selector=conf.get(NODE_SELECTORS_EXPERIMENTS))
def tenant_id(self): return conf.get(AUTH_AZURE_TENANT_ID)
def is_expired(self): return self.started_at + timedelta(days=conf.get('TTL_TOKEN')) <= timezone.now()
def get_oauth_client_id(self): return conf.get(AUTH_AZURE_CLIENT_ID)
def get_oauth_client_secret(self): return conf.get(AUTH_AZURE_CLIENT_SECRET)
def __init__(self, namespace, project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid, original_name=None, cloning_strategy=None, job_container_name=None, job_docker_image=None, job_docker_image_pull_policy=None, sidecar_container_name=None, sidecar_docker_image=None, sidecar_docker_image_pull_policy=None, init_container_name=None, init_docker_image=None, init_docker_image_pull_policy=None, role_label=None, type_label=None, app_label=None, health_check_url=None, use_sidecar=False, sidecar_config=None, log_level=None, declarations=None): super().__init__( namespace=namespace, project_name=project_name, project_uuid=project_uuid, job_container_name=job_container_name, job_docker_image=job_docker_image, job_docker_image_pull_policy=job_docker_image_pull_policy, sidecar_container_name=sidecar_container_name or conf.get(CONTAINER_NAME_SIDECARS), sidecar_docker_image=sidecar_docker_image or conf.get(SIDECARS_DOCKER_IMAGE), sidecar_docker_image_pull_policy=( sidecar_docker_image_pull_policy or conf.get(SIDECARS_IMAGE_PULL_POLICY)), init_container_name=init_container_name or conf.get(CONTAINER_NAME_INIT), init_docker_image=init_docker_image or conf.get(INIT_DOCKER_IMAGE), init_docker_image_pull_policy=( init_docker_image_pull_policy or conf.get(INIT_IMAGE_PULL_POLICY)), role_label=role_label or conf.get(ROLE_LABELS_WORKER), type_label=type_label or conf.get(TYPE_LABELS_RUNNER), app_label=app_label or conf.get(APP_LABELS_EXPERIMENT), health_check_url=health_check_url, use_sidecar=use_sidecar, sidecar_config=sidecar_config, log_level=log_level, ) self.project_name = project_name self.experiment_group_name = experiment_group_name self.experiment_name = experiment_name self.experiment_group_uuid = experiment_group_uuid self.experiment_uuid = experiment_uuid self.original_name = original_name self.cloning_strategy = cloning_strategy self.declarations = declarations self.experiment_labels = self.get_experiment_labels() self.cluster_def = None
def start_notebook(self, persistence_outputs=None, persistence_data=None, outputs_refs_jobs=None, outputs_refs_experiments=None, resources=None, labels=None, annotations=None, secret_refs=None, config_map_refs=None, node_selector=None, affinity=None, tolerations=None, backend=None, max_restarts=None, reconcile_url=None, mount_code_in_notebooks=False): ports = [self.request_notebook_port()] target_ports = [self.port] volumes, volume_mounts = get_pod_volumes(persistence_outputs=persistence_outputs, persistence_data=persistence_data) refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes( outputs_refs=outputs_refs_jobs, persistence_outputs=persistence_outputs) volumes += refs_volumes volume_mounts += refs_volume_mounts refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes( outputs_refs=outputs_refs_experiments, persistence_outputs=persistence_outputs) volumes += refs_volumes volume_mounts += refs_volume_mounts shm_volumes, shm_volume_mounts = get_shm_volumes() volumes += shm_volumes volume_mounts += shm_volume_mounts context_volumes, context_mounts = get_auth_context_volumes() volumes += context_volumes volume_mounts += context_mounts if mount_code_in_notebooks: code_volume, code_volume_mount = self.get_notebook_code_volume() volumes.append(code_volume) volume_mounts.append(code_volume_mount) resource_name = self.resource_manager.get_resource_name() args = self.get_notebook_args(deployment_name=resource_name, mount_code_in_notebooks=mount_code_in_notebooks, backend=backend) command = ["/bin/sh", "-c"] labels = get_labels(default_labels=self.resource_manager.labels, labels=labels) deployment = self.resource_manager.get_deployment( resource_name=resource_name, volume_mounts=volume_mounts, volumes=volumes, labels=labels, env_vars=None, command=command, args=args, init_env_vars=self.get_init_env_vars(), persistence_outputs=persistence_outputs, persistence_data=persistence_data, outputs_refs_jobs=outputs_refs_jobs, outputs_refs_experiments=outputs_refs_experiments, secret_refs=secret_refs, config_map_refs=config_map_refs, resources=resources, annotations=annotations, ephemeral_token=None, node_selector=node_selector, affinity=affinity, tolerations=tolerations, ports=target_ports, init_context_mounts=context_mounts, reconcile_url=reconcile_url, max_restarts=max_restarts, restart_policy=get_deployment_restart_policy(max_restarts)) dep_resp, _ = self.create_or_update_deployment(name=resource_name, body=deployment, reraise=True) service = services.get_service( namespace=self.namespace, name=resource_name, labels=self.resource_manager.get_labels(), ports=ports, target_ports=target_ports, service_type=self._get_service_type()) service_resp, _ = self.create_or_update_service(name=resource_name, body=service, reraise=True) results = {'deployment': dep_resp.to_dict(), 'service': service_resp.to_dict()} if self._use_ingress(): annotations = json.loads(conf.get(K8S_INGRESS_ANNOTATIONS)) paths = [{ 'path': '/notebooks/{}'.format(self.project_name.replace('.', '/')), 'backend': { 'serviceName': resource_name, 'servicePort': ports[0] } }] ingress = ingresses.get_ingress(namespace=self.namespace, name=resource_name, labels=self.resource_manager.get_labels(), annotations=annotations, paths=paths) self.create_or_update_ingress(name=resource_name, body=ingress, reraise=True) return results