def update_changed_test_case(self, test_case, new_commit, conf): old_test_case = test_case['old_test_line'] new_test_case = test_case['new_test_line'] class_name, _old_test_case = self.get_test_case_from_line( old_test_case, "") class_name, _new_test_case = self.get_test_case_from_line( new_test_case, "") old_t = self.get_test_cases_document(_old_test_case, conf) new_t = self.get_test_cases_document(_new_test_case, conf) self.remove_unwanted_fields(_old_test_case) self.remove_unwanted_fields(_new_test_case) history = self.get_history(new_commit, "change") old_document_key = hashlib.md5( json.dumps(_old_test_case, sort_keys=True)).hexdigest() new_document_key = hashlib.md5( json.dumps(_new_test_case, sort_keys=True)).hexdigest() client = CLIENT[BUCKET] try: old_document = client.get(old_document_key).value except: old_document = None if old_document_key == new_document_key: new_t.change_history = [history] to_upsert = new_t.__dict__ new_conf = pydash.clone_deep(to_upsert['confFile']) pydash.merge_with(to_upsert, old_document, TestCaseCollector._merge_dict) TestCaseCollector._flatten_conf(to_upsert['confFile'], new_conf) client.upsert(old_document_key, to_upsert) else: old_t.change_history = [history] new_t.change_history = [history] old_t.changed = True new_t.changed = True old_t.newChangedDocId = new_document_key new_t.oldChangedDocId = old_document_key old_to_upsert = old_t.__dict__ new_to_upsert = new_t.__dict__ if old_document: new_conf = pydash.clone_deep(old_to_upsert['confFile']) old_to_upsert = pydash.merge_with( old_to_upsert, old_document, TestCaseCollector._merge_dict) TestCaseCollector._flatten_conf(old_to_upsert['confFile'], new_conf) try: new_document = client.get(new_document_key).value except: new_document = None if new_document: new_to_upsert = pydash.merge_with( new_to_upsert, new_document, TestCaseCollector._merge_dict) new_conf = pydash.clone_deep(new_to_upsert['confFile']) new_to_upsert = pydash.merge_with(new_to_upsert, old_document, TestCaseCollector._merge_dict) TestCaseCollector._flatten_conf(new_to_upsert['confFile'], new_conf) client.upsert(old_document_key, old_to_upsert) client.upsert(new_document_key, new_to_upsert)
def get_test_case_id(self, test_result): test_case = self.get_test_case_from_test_result(test_result) conf_file = test_case[ 'conf_file'] if 'conf_file' in test_case else self.get_conf_from_store( test_case) if not conf_file: return if "conf/" in conf_file: conf_file = conf_file.replace("conf/", '') test_cases = self.get_test_cases_from_conf(conf_file) test_cases_clone = pydash.clone_deep(test_cases) def remove_group(x): pydash.unset(x, 'GROUP') pydash.unset(x, 'commented') pydash.unset(x, 'testLine') test_cases_dict_without_groups = pydash.for_each( test_cases_clone, remove_group) callback = lambda x: set(x.items()).issubset(set(test_case.items())) index = pydash.find_index(test_cases_dict_without_groups, callback) if index == -1: return None name = test_cases[index] self.remove_unwanted_fields(name) return hashlib.md5(json.dumps(name, sort_keys=True)).hexdigest()
def _minMax(self, gameBoard: Board3D, maximizing: bool, depth: int = 0) -> int: board = _.clone_deep(gameBoard) score = self._evaluateBoard( board, customWinner=self._operations3D.getBoardWinner) if score > 0 or score < 0 or ( score == 0 and not self._operations3D.hasBoardAnyMoves(board)): return score bestValue = -sys.maxsize if maximizing else sys.maxsize for i in range(len(board)): for j in range(len(board[i])): for k in range(len(board[i][j])): if board[i][j][k] == 'E' and (self._maxDepth <= 0 or depth + 1 <= self._maxDepth): board[i][j][ k] = self._player if maximizing else self._opponent newValue = self._minMax(board, not maximizing, depth + 1) bestValue = max(bestValue, newValue) if maximizing else min( bestValue, newValue) board[i][j][k] = 'E' return bestValue
def test_clone_deep(case): result = _.clone_deep(case) assert result is not case for key, value in _.helpers.iterator(result): assert value is not case[key]
def test_clone_deep(case, kargs): kargs["is_deep"] = True actuals = [_.clone(case, **kargs), _.clone_deep(case, callback=kargs.get("callback"))] for actual in actuals: assert actual is not case for key, value in _.helpers.iterator(actual): assert value is not case[key]
def agg_builder(mapping, agg_keys): current = {} child = None for k in reversed(agg_keys): current = {k: _.get(mapping, k + '.agg_cmd')} if current: if child: current = _.set_(current, k + ".aggs", child) child = _.clone_deep(current) return current
def __message(self, message: Union[str, Exception], metadata: dict = None) -> str: msg: str = str(message) if isinstance(metadata, dict): clone_data = pydash.clone_deep(metadata) masked_data = CustomLogger.__clear_sensitive_data(self, clone_data) sensitive_data = json.dumps(masked_data, indent=1) return msg + " " + sensitive_data return msg + " " + str(metadata)
def update_deleted_test_case(self, test_case, new_commit, conf): class_name, _test_case = self.get_test_case_from_line(test_case, "") t = self.get_test_cases_document(_test_case, conf) t.deleted = True history = self.get_history(new_commit, "delete") t.change_history = [history] self.remove_unwanted_fields(_test_case) document_key = hashlib.md5(json.dumps(_test_case, sort_keys=True)).hexdigest() client = CLIENT[BUCKET] try: existing_document = client.get(document_key).value to_upsert = t.__dict__ new_conf = pydash.clone_deep(to_upsert['confFile']) pydash.merge_with(to_upsert, existing_document, TestCaseCollector._merge_dict) TestCaseCollector._flatten_conf(to_upsert['confFile'], new_conf) client.upsert(document_key, to_upsert) except Exception as e: print e
def _findBestMove(self, gameBoard: Board, player: Player = None) -> Move: forPlayer = self._onMove if player is None else player isPlayerOnMove = forPlayer == self._player bestValue = -sys.maxsize if isPlayerOnMove else sys.maxsize bestMove: Move3D = (forPlayer, -1, -1, -1) board = self._projectBoard(_.clone_deep(gameBoard)) if self._operations3D.isBoardEmpty(board): return forPlayer, random.randint(0, self._size - 1), random.randint(0, self._size - 1), \ random.randint(0, self._size - 1) for i in range(len(board)): for j in range(len(board[i])): for k in range(len(board[i][j])): if board[i][j][k] == 'E': board[i][j][k] = forPlayer newVal = self._minMax(board, isPlayerOnMove, 0) if (isPlayerOnMove and newVal > bestValue) or \ (not isPlayerOnMove and newVal < bestValue): bestValue = newVal bestMove = (forPlayer, i, j, k) return self._unprojectMove(self._board, bestMove)
def main(argsdict): dryrun = argsdict.get('dryrun') debug = argsdict.get('debug') if debug: logging.getLogger().setLevel(logging.DEBUG) userId = USER_EMAIL logging.warning(f'Remove user: {userId}, Dryrun: {dryrun}') idToken = getToken(apiSecret=API_SECRET, apiKey=API_KEY) user = getResource(id=userId, idToken=idToken, url=USER_API) # TODO: check the team contains only this user # also need to skip "groups" teamIds = user.get('teamIds', []) accessRoleIds = user.get('accessRoleIds') # NOTE: Fetch all apps and related policyId policyAppMapper = {} apps = getResources(idToken=idToken, url=APP_API) for app in apps: appId = app.get('id') policyId = app.get('policyId') # policy exists if bool(policyId): appIds = pydash.get(policyAppMapper, f'{policyId}.appId', []) appIds.append(appId) pydash.set_(policyAppMapper, f'{policyId}.appId', appIds) # NOTE: Check each policy if it's deletable or not. It only handles ruleRoleLink except Role policyIds = list(policyAppMapper.keys()) for policyId in policyIds: policyRoleIds = [] policy = getResource(id=policyId, idToken=idToken, url=POLICY_API) rules = pydash.objects.get(policy, 'rules') for rule in rules: roleIds = pydash.objects.get(rule, 'accessRoleIds') policyRoleIds.append(roleIds) policyRoleIds = pydash.flatten_deep(policyRoleIds) # NOTE: In case the policyRoleIds is totally equal with userRoleIds, we will delete it. if set(policyRoleIds) <= set(accessRoleIds): pydash.set_(policyAppMapper, f'{policyId}.deletable', True) else: pydash.set_(policyAppMapper, f'{policyId}.deletable', False) deletablePolicyMapper = pydash.pick_by(policyAppMapper, lambda item: pydash.get(item, 'deletable') == True) deletablePolicyIds = list(deletablePolicyMapper.keys()) deletableAppIds = pydash.flatten_deep([pydash.get(deletablePolicyMapper, f'{i}.appId') for i in deletablePolicyMapper]) # NOTE: delete app if its policy will be deleted. for appId in deletableAppIds: purgeResource(dryrun, id=appId, idToken=idToken, url=APP_API) # NOTE: delete policy something like policyEntry, policyRole relationship and ruleEntry for policyId in deletablePolicyIds: purgeResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API) # NOTE: remove relationship something like userTeamLink, userRoleLink, teamRoleLink. for teamId in teamIds: purgeResource(dryrun, id=teamId, idToken=idToken, url=f'{TEAM_API}/{teamId}/users/', data=[userId]) for roleId in accessRoleIds: purgeResource(dryrun, id=roleId, idToken=idToken, url=f'{ROLE_API}/{roleId}/users/', data=[userId]) purgeResource(dryrun, id=roleId, idToken=idToken, url=f'{ROLE_API}/{roleId}/teams/', data=teamIds) # NOTE: remove teams deletableTeamIds = [] for teamId in teamIds: team = getResource(id=teamId, idToken=idToken, url=TEAM_API) teamEmails = pydash.get(team, 'emails') teamRoleIds = pydash.get(team, 'accessRoleIds') # NOTE: check the role contains only this user and teams if len(set(teamEmails) - set([userId])) == 0 and len(set(teamRoleIds) - set(accessRoleIds)) == 0: deletableTeamIds.append(teamId) # NOTE: remove roles deletableRoleIds = [] for roleId in accessRoleIds: role = getResource(id=roleId, idToken=idToken, url=ROLE_API) roleEmails = pydash.get(role, 'emails') roleTeamIds = pydash.get(role, 'teamIds') # NOTE: check the role contains only this user and teams if len(set(roleEmails) - set([userId])) == 0 and len(set(roleTeamIds) - set(teamIds)) == 0: deletableRoleIds.append(roleId) for teamId in deletableTeamIds: purgeResource(dryrun, id=teamId, idToken=idToken, url=TEAM_API) for roleId in deletableRoleIds: purgeResource(dryrun, id=roleId, idToken=idToken, url=ROLE_API) # NOTE: handle orphan policy once app was deleted before updatablePolicyDataSet = {} deletablePolicyIds = {} policies = getResources(idToken=idToken, url=POLICY_API) for policy in policies: policyId = policy.get('id') policyRoleIds = [] rules = pydash.objects.get(policy, 'rules') for ruleIdx, rule in enumerate(rules): ruleRoleIds = rule.get('accessRoleIds') policyRoleIds.append(ruleRoleIds) # NOTE: Handle the detail Configure policy remainingRuleRoleIds = set(ruleRoleIds) - set(accessRoleIds) remainingRuleRoleIds = list(remainingRuleRoleIds) if len(remainingRuleRoleIds) > 0 and len(ruleRoleIds) != len(remainingRuleRoleIds): newPolicy = pydash.get(updatablePolicyDataSet, policyId, pydash.clone_deep(policy)) pydash.set_(newPolicy, f'rules.{ruleIdx}.accessRoleIds', remainingRuleRoleIds) pydash.set_(updatablePolicyDataSet, policyId, newPolicy) elif len(remainingRuleRoleIds) == 0: newPolicy = pydash.get(updatablePolicyDataSet, policyId, pydash.clone_deep(policy)) pydash.set_(newPolicy, f'rules.{ruleIdx}.accessRoleIds', []) pydash.set_(updatablePolicyDataSet, policyId, newPolicy) policyRoleIds = pydash.flatten_deep(policyRoleIds) # NOTE: In case the policyRoleIds is totally equal with userRoleIds, we will delete it. if set(policyRoleIds) <= set(accessRoleIds): pydash.set_(deletablePolicyIds, policyId, policy) elif len(policyRoleIds) == 0: # NOTE: Relationship was removed previously pydash.set_(deletablePolicyIds, policyId, policy) # NOTE: Handle Configure policy for policyId in updatablePolicyDataSet: policy = pydash.get(updatablePolicyDataSet, policyId) if pydash.get(deletablePolicyIds, policyId, None) != None: continue rules = policy.get('rules', []) newRules = [rule for rule in rules if len(rule.get('accessRoleIds', [])) > 0] pydash.set_(policy, 'rules', newRules) updateResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API, data = policy) for policyId in deletablePolicyIds: purgeResource(dryrun, id=policyId, idToken=idToken, url=POLICY_API) # NOTE: handle orphan team once app was deleted before orphanTeamIds = [] teams = getResources(idToken=idToken, url=TEAM_API) for team in teams: teamId = team.get('id') teamEmails = pydash.get(team, 'emails') if teamEmails == [userId]: # NOTE: Other case will be hanlded by user deleting orphanTeamIds.append(teamId) for teamId in orphanTeamIds: purgeResource(dryrun, id=teamId, idToken=idToken, url=f'{TEAM_API}/{teamId}/users/', data=[userId]) # NOTE: handle orphan role once app was deleted before orphanRoleIds = [] roles = getResources(idToken=idToken, url=ROLE_API) for role in roles: roleId = role.get('id') roleEmails = pydash.get(role, 'emails') roleTeamIds = pydash.get(role, 'teamIds') # NOTE: skip this team including others relationship if roleEmails == [userId] and len(set(roleTeamIds) - set(teamIds)) == 0: orphanRoleIds.append(roleId) for roleId in orphanRoleIds: purgeResource(dryrun, id=roleId, idToken=idToken, url=ROLE_API) # NOTE: remove userEntry, and his relationship team, rule link, etc # TODO: check the team contains only this user # also need to skip "groups" purgeResource(dryrun, id=userId, idToken=idToken, url=USER_API)