def generateUserToken(user_id: str): """ Creates a token for a specific user. Removes any token previously created for the user. """ response = ApiResponse() user = User.query.filter_by(id=user_id).first() timestamp = time.time() timestamp_millis = int(round(timestamp * 1000)) token_ids = sha256(hash_id(timestamp_millis + randint(0, 9999))) token_value = sha256(hash_id(timestamp_millis) + str(uuid4())) expires_at = int(timestamp + TOKEN_EXPIRATION_TIME) if user: token = Token(ids=token_ids, ip=request.remote_addr, token=token_value, User_id=user.id, ut_created_at=timestamp, ut_expires_at=expires_at) TokenService.clearUserTokens(user.id) if database.save_changes(token) is False: response.setMessage( "An error occured while persisting data to the database") else: response.setSuccess() response.setMessage("Token successfuly generated") response.setDetails({ "token": token_value, "expires_at": expires_at }) else: response.setMessage("User not found in the database") return response
def from_dict(cls, d: dict): units = map(Unit.from_dict, d['units']) units = {unit.id: unit for unit in units} tokens = [ Token(units[placement['unitId']], (placement['x'], placement['y'])) for placement in d['unitsPlacement'] ] return cls(tokens)
def clean_token(conf, token): t = Token() t.set_spacy_token(token) # spelled = spell.check(token.lemma_, conf.spa_dict) spelled = spell.check_exact(token.lemma_, conf.spa_dict) # print(spelled) # if '+93' in token.text: # print(token.text, str(token.pos_), str(token.ent_type_)) if str(token.pos_) == 'NOUN': # \ # or str(token.pos_) == 'PROPN': t.stop = False if str(token.ent_type_) == 'PER' \ or token.like_num \ or token.like_url \ or token.like_email \ or token.is_quote \ or token.is_bracket \ or token.is_space \ or token.is_right_punct \ or token.is_left_punct \ or token.is_punct \ or token.is_digit \ or token.is_currency \ or len(token.text) <= 3 \ or len(spelled) == 0: t.stop = True if str(token.ent_type_) == 'MISC' \ or str(token.ent_type_) == 'ORG' \ or len(token.text) > 3 and len(spelled) > 0: t.stop = False if str(token.pos_) == 'SPACE' \ or str(token.pos_) == 'NUM' \ or str(token.pos_) == 'DET' \ or str(token.pos_) == 'CONJ' \ or str(token.pos_) == 'SCONJ' \ or str(token.pos_) == 'PUNCT' \ or '/' in token.text \ or '' in token.text \ or len(token.text) <= 2: t.stop = True # or '/' in token.text \??? return t
def parse_next_line(self): line = self.read_next_line() self.current_line += 1 new_col, string, col, line_size, tabs = 1, "", 0, len(line), 0 while col < line_size and (line[col] == ' ' or line[col] == '\t'): new_col += 1 if line[col] == ' ' else self.TAB_SIZE tabs += 1 if line[col] == '\t' else 0 col += 1 if col == line_size: # empty line return False while col < line_size: c = line[col] if c == '\'' or c == '"': # string and character literals string = c col += 1 while col < line_size: c = line[col] if c == '\\': col += 1 if col < line_size and line[col] in self.escape_char: string += self.escape_char[line[col]] else: string += '\\' else: string += c if c == string[0]: break col += 1 category = self.get_category(string) if category != TokenCategory.unknown: string = string[1:len(string) - 1] self.token_buffer.append( Token(TokenPosition(self.current_line, new_col), category, string)) string, new_col = "", col + 1 + (self.TAB_SIZE * tabs) elif self.is_separator(c): if string: self.token_buffer.append( Token(TokenPosition(self.current_line, new_col), self.get_category(string), string)) if c != ' ' and c != '\t': new_col = col + 1 + (self.TAB_SIZE - 1) * tabs if c == '*': col += 1 if col < line_size and (line[col] == '/' or line[col] == '*'): c += line[col] else: col -= 1 if c == '<' or c == '>': col += 1 if col < line_size and (line[col] == '<' or line[col] == '>' or line[col] == '='): c += line[col] else: col -= 1 if c == '/': col += 1 if col < line_size and line[col] == '/': return True col -= 1 if c == '&' or c == '|': col += 1 if col < line_size and (line[col] == '&' or line[col] == '|'): c += line[col] else: col -= 1 self.token_buffer.append( Token(TokenPosition(self.current_line, new_col), self.separators[c], c)) string, new_col = "", col + 2 + (self.TAB_SIZE - 1) * tabs else: string += c col += 1 if string: self.token_buffer.append( Token(TokenPosition(self.current_line, new_col), self.get_category(string), string)) return True
assert token43.unit.id == "43" assert token43.position == (2, 3) assert token44.unit.id == "44" assert token44.position == (5, 7) @pytest.mark.parametrize('board, expected', [ ( Board([]), False ), ( Board([ Token(Unit(id=2, hp=0), (3, 5)) ]), False ), ( Board([ Token(Unit(id=2, hp=100), (3, 5)) ]), True ), ( Board([ Token(Unit(id=2, hp=100), (3, 5)), Token(Unit(id=7, hp=0), (11, 13)), Token(Unit(id=17, hp=100), (19, 21)) ]),
from math import sqrt import pytest from pytest import approx from model.Token import Token from model.Unit import Unit @pytest.mark.parametrize('token1, token2, expected', [(Token(Unit(), (0, 0)), (Token(Unit(), (0, 0))), 0.0), (Token(Unit(), (3, 4)), (Token(Unit(), (0, 0))), 5.0), (Token(Unit(), (21, 9)), (Token(Unit(), (37, 11))), sqrt(260))]) def test_eq(token1, token2, expected): assert token1.distance(token2) - token2.distance(token1) == approx( 0, abs=1e-10) assert token1.distance(token2) == approx(expected, abs=1e-10)