def GetSparkContext(): conf = pyspark.SparkConf() conf.setAppName("analysis_nochain_seed") conf.set("spark.executor.memory", "1G") conf.set("spark.kryoserializer.buffer.max", "512M") sc = extend_pyspark.SparkContext(conf=conf) return sc
def test_create_build_without_tag_and_rebuild_latest_always_results_in_new_job(self): conf.set(key=BUILD_JOBS_ALWAYS_PULL_LATEST, value=True) assert BuildJobStatus.objects.count() == 0 assert BuildJob.objects.count() == 0 build_job, rebuild = BuildJob.create( user=self.project.user, project=self.project, config={'image': 'my_image'}, code_reference=self.code_reference) self.assertEqual(rebuild, True) assert build_job.last_status == JobLifeCycle.CREATED assert BuildJobStatus.objects.count() == 1 assert BuildJob.objects.count() == 1 # Building with same config does not create a new build job new_build_job, rebuild = BuildJob.create( user=self.project.user, project=self.project, config={'image': 'my_image'}, code_reference=self.code_reference) self.assertEqual(rebuild, True) assert build_job.last_status == JobLifeCycle.CREATED assert BuildJobStatus.objects.count() == 2 assert BuildJob.objects.count() == 2 assert new_build_job != build_job
def test_get_registry_context_in_cluster(self): spec = get_registry_context(build_backend=None) assert spec.host == 'registry_localhost' assert spec.secret is None assert spec.secret_items is None assert spec.insecure is True spec = get_registry_context(build_backend=BuildBackend.NATIVE) assert spec.host == 'registry_localhost' assert spec.secret is None assert spec.secret_items is None assert spec.insecure is True spec = get_registry_context(build_backend=BuildBackend.KANIKO) assert spec.host == 'registry_host' assert spec.secret is None assert spec.secret_items is None assert spec.insecure is True # Creating a registry access and using it with in cluster registry secret = K8SSecret.objects.create(owner=self.owner, name='my_secret', k8s_ref='my_secret') registry_access = RegistryAccess.objects.create(owner=self.owner, k8s_secret=secret, name='d-registry') conf.set(ACCESS_REGISTRY, registry_access.id) spec = get_registry_context(build_backend=BuildBackend.KANIKO) assert spec.host == 'registry_host' assert spec.secret == secret.k8s_ref assert spec.secret_items == secret.items assert spec.insecure is True
def test_cannot_upload_if_project_has_a_running_notebook_with_code_mount(self): conf.set(key=NOTEBOOKS_MOUNT_CODE, value=True) user = self.auth_client.user repo_name = self.project.name # Update project with has_notebook True notebook = NotebookJobFactory(project=self.project) notebook.set_status(status=JobLifeCycle.RUNNING) assert self.model_class.objects.count() == 0 uploaded_file = self.get_upload_file() with patch('api.repos.views.handle_new_files') as mock_task: response = self.auth_client.put(self.url, data={'repo': uploaded_file}, content_type=MULTIPART_CONTENT) assert response.status_code == status.HTTP_403_FORBIDDEN file_path = '{}/{}/{}.tar.gz'.format( conf.get(UPLOAD_MOUNT_PATH), user.username, repo_name) self.assertFalse(os.path.exists(file_path)) assert mock_task.call_count == 0 # No new repo was not created and still exists assert self.model_class.objects.count() == 0 repo_path = '{}/{}/{}/{}'.format( conf.get(REPOS_MOUNT_PATH), user.username, repo_name, repo_name) self.assertFalse(os.path.exists(repo_path))
def create(self, request, *args, **kwargs): if not self.request.data: raise ValidationError('Received no config options.') for key in self.request.data: value = self.request.data.get(key) try: if value: conf.set(key, value) else: conf.delete(key) except ConfException as e: raise ValidationError(e) return Response(data={}, status=status.HTTP_200_OK)
def test_get_external_registry_context(self): secret = K8SSecret.objects.create(owner=self.owner, name='my_secret', k8s_ref='my_secret') registry_access = RegistryAccess.objects.create(owner=self.owner, host='https://index.docker.io/v1/foo', k8s_secret=secret, name='d-registry') conf.set(ACCESS_REGISTRY, registry_access.id) spec = get_registry_context(build_backend=None) assert spec.host == 'https://index.docker.io/v1/foo' assert spec.secret == secret.k8s_ref assert spec.secret_items == secret.items assert spec.insecure is False
def test_delete_build_jobs(self): project1 = ProjectFactory() BuildJobFactory(project=project1) project1.archive() project2 = ProjectFactory() job2 = BuildJobFactory(project=project2) job2.archive() assert BuildJob.all.count() == 2 conf.set(CLEANING_INTERVALS_ARCHIVES, -10) delete_archived_build_jobs() # Although the other entity is archived it's not deleted because of project1 assert BuildJob.all.count() == 1
def test_delete_experiment_groups(self): project1 = ProjectFactory() ExperimentGroupFactory(project=project1) project1.archive() project2 = ProjectFactory() experiment_group2 = ExperimentGroupFactory(project=project2) experiment_group2.archive() assert ExperimentGroup.all.count() == 2 conf.set(CLEANING_INTERVALS_ARCHIVES, -10) delete_archived_experiment_groups() # Although the other entity is archived it's not deleted because of project1 assert ExperimentGroup.all.count() == 1
def test_delete_projects(self): project1 = ProjectFactory() ExperimentFactory(project=project1) project1.archive() project2 = ProjectFactory() experiment2 = ExperimentFactory(project=project2) experiment2.archive() assert Project.all.count() == 2 assert Experiment.all.count() == 2 conf.set(CLEANING_INTERVALS_ARCHIVES, -10) delete_archived_projects() # Deletes only one project assert Project.all.count() == 1 # Although the other experiment is archived it's not deleted because of project2 assert Experiment.all.count() == 1
def test_not_allowed_to_create(self): conf.set(key=ALLOW_USER_OWNERSHIP, value=False) data = {'name': 'new_project'} resp = self.auth_client.post(self.url, data) assert resp.status_code == status.HTTP_400_BAD_REQUEST
def test_cannot_create(self): conf.set(key=ALLOW_USER_OWNERSHIP, value=False) with self.assertRaises(ValidationError): ProjectFactory()
def update (self): """Update puzzle and check win conditions. Returns whether anything changed. """ if not self.frozen or self._next_step: # fast-forward by increasing FPS if self.solving and self._ff == 2: if not hasattr(self, '_FRAME'): self._FRAME = self.FRAME self.FRAME /= conf.FF_SPEEDUP elif hasattr(self, '_FRAME'): self.FRAME = self._FRAME del self._FRAME # continue solving if self.solving: self.solve() # continue recording if self.recording: self._recording_frame += 1 # step puzzle forwards rtn = self.puzzle.step() self._next_step = False # reset list of moves made this frame self._moved = [] self._stored_moves = [] else: rtn = False # check for surfaces with their corresponding Block types on them win = True for col in self.puzzle.grid: for s, b, sel in col: # goal surfaces have IDs starting at 0 if s >= 0 and (not isinstance(b, Block) or s != b.type): win = False break # need to stay winning for one frame - that is, blocks must have # stopped on the goals, not just be moving past them if win: if not self._winning: self._winning = True # else if this is the first frame since we've won, elif not self.won: # stop solving if self.solving: if self._finished_solving: self.stop_solving() win = self._winning else: win = False if win: # save to disk if not self.solving and self.ID is not None: levels = conf.get('completed_levels', []) if self.ID not in levels: levels.append(self.ID) conf.set(completed_levels = levels) self.game.set_backend_attrs(menu.MainMenu, 're_init', True) # store solve method solved = conf.get('solve_methods', []) solved.append(True) conf.set(solve_methods = solved) # call win callback if self.win_cb is not None: self.win_cb[0](*self.win_cb[1:]) # play victory sound if self.sound: self.game.play_snd('win') self.won = True else: self._winning = False return rtn
def solve (self, solution = 0, stop_on_finish = True): """Solve the puzzle. Takes the solution number to use (its index in the list of solutions ordered as in the puzzle definition). This defaults to 0 (the 'primary' solution). Returns a list of the directions moved. This function is also called to move to the next step of an ongoing solution, in which case it requires no argument. In fact, if a solution is ongoing, it cannot be called as detailed above (any argument is ignored). This makes it a bad idea to call this function while solving. """ i = self.solving_index if i is None: # starting self.reset() self.solving = True self.solving_index = 0 self._solution = self._parse_soln(solution) self._solution_ff = self._parse_soln(solution, 0) self._solve_time = self._solution[0][1] self._solve_time_ff = self._solution_ff[0][1] self._finished_solving = False # store solve method if self.ID is not None: levels = conf.get('completed_levels', []) if self.ID not in levels: solved = conf.get('solve_methods', []) solved.append(False) conf.set(solve_methods = solved) # call this function again to act on the first instruction move = self.solve() elif i == len(self._solution): # finished: just wait until the level ends self._finished_solving = True if stop_on_finish: self.stop_solving() move = [] else: # continuing if i % 2: # make a move move = self._solution[i] self.move(False, *move) i += 1 if i < len(self._solution): self._solve_time = self._solution[i][1] self._solve_time_ff = self._solution_ff[i][1] self.solving_index = i else: # wait # if fast-forwarding, use the quicker solution fast = self._ff or self.frozen t = self._solve_time_ff if fast else self._solve_time if t <= 0: self.solving_index += 1 # do next step now move = self.solve() else: self._solve_time -= 1 self._solve_time_ff -= 1 held = self._solution[self.solving_index][0] if held: # want to send some input every frame for this delay self.move(False, *held) move = held else: move = [] self._ff = False return move
def run(): print conf.get('IP') conf.set('IP', 20)
from threading import Timer from time import sleep import conf import tmp2 if __name__ == "__main__": #conf.init() conf.set('IP', 10) print conf.get('IP') x = Timer(1, tmp2.run) x.start() sleep(2) print conf.get('IP')