def parse(self): if self.use_cache: fp = digest(self.txt.encode('utf-8')).digest() try: return self._cache[fp] except KeyError: pass self.tokens = tokenize(self.txt, included=self.included, replace_tags=self.replace_tags) self.pos = 0 n = [] while 1: ty, txt = self.getToken() if ty == symbols.bra_open: n.append(self.parseOpenBrace()) elif ty is None: break elif ty == symbols.noi: self.pos += 1 # ignore <noinclude> else: # bra_close, link, txt n.append(txt) self.pos += 1 n = optimize(n) if self.use_cache: self._cache[fp] = n return n
def parse(self): if self.use_cache: fp = digest(self.txt.encode('utf-8')).digest() try: return self._cache[fp] except KeyError: pass self.tokens = tokenize(self.txt, included=self.included, replace_tags=self.replace_tags) self.pos = 0 n = [] while 1: ty, txt = self.getToken() if ty==symbols.bra_open: n.append(self.parseOpenBrace()) elif ty is None: break elif ty==symbols.noi: self.pos += 1 # ignore <noinclude> else: # bra_close, link, txt n.append(txt) self.pos += 1 n=optimize(n) if self.use_cache: self._cache[fp] = n return n
def idw(): resp = request.urlopen(URL, timeout=10) exp_len = int(resp.info()['Content-Length']) content = resp.read() if exp_len != len(content): return hashed = digest(content).hexdigest() if hashed in have_hashes: return have_hashes.append(hashed) timePrint(f'Received {hashed}, currently {len(have_hashes)}') ext = filetype.guess_extension(content) filename = f'{hashed}.{ext}' dumpBlob(filename, content)
def main(): os.chdir('ljyys') os.chdir('pics') for fn in tqdm(os.listdir('.')): with open(fn, 'rb') as f: k = digest(f.read()).hexdigest() if k in digests: os.remove(fn) else: digests.append(k) ext = fn.split('.')[1] os.rename(fn, f'{k}.{ext}') with open('got_hashes.txt', 'w') as f: for h in tqdm(digests): print(h, file=f)
import os from hashlib import sha256 as digest from tqdm import tqdm os.chdir('ljyys') correct_sha = set() my_sha = set() with open('hash.txt') as r: correct_sha.update(r.read().splitlines()) os.chdir('pics') for fn in tqdm(os.listdir('.')): with open(fn, 'rb') as r: my_sha.add(digest(r.read()).hexdigest()) print(correct_sha - my_sha)
def _cksum(self, data): h = digest(data.encode('utf-8')) return h.hexdigest()
def add_element_hash_to_id_token(self, id_token, element_name, value_to_hash, hashlib_info=None, algorithm=None): # at_hash # Access Token hash value. Its value is the base64url encoding of the left-most half of the # hash of the octets of the ASCII representation of the access_token value, where the hash # algorithm used is the hash algorithm used in the alg Header Parameter of the ID Token's # JOSE Header. For instance, if the alg is RS256, hash the access_token value with SHA-256, then # take the left-most 128 bits and base64url encode them. The at_hash value is a case sensitive string. # If the ID Token is issued from the Authorization Endpoint with an access_token value, which is the # case for the response_type value code id_token token, this is REQUIRED; otherwise, its inclusion is OPTIONAL. # # c_hash # Code hash value. Its value is the base64url encoding of the left-most half of the hash of the octets of # the ASCII representation of the code value, where the hash algorithm used is the hash algorithm used in # the alg Header Parameter of the ID Token's JOSE Header. For instance, if the alg is HS512, hash the code # value with SHA-512, then take the left-most 256 bits and base64url encode them. The c_hash value is a case # sensitive string. # If the ID Token is issued from the Authorization Endpoint with a code, which is the case for the # response_type values code id_token and code id_token token, this is REQUIRED; otherwise, its inclusion # is OPTIONAL. # The table below is the set of "alg" (algorithm) Header Parameter # values defined by this specification for use with JWS, each of which # is explained in more detail in the following sections: # # +--------------+-------------------------------+--------------------+ # | "alg" Param | Digital Signature or MAC | Implementation | # | Value | Algorithm | Requirements | # +--------------+-------------------------------+--------------------+ # | HS256 | HMAC using SHA-256 | Required | # | HS384 | HMAC using SHA-384 | Optional | # | HS512 | HMAC using SHA-512 | Optional | # | RS256 | RSASSA-PKCS1-v1_5 using | Recommended | # | | SHA-256 | | # | RS384 | RSASSA-PKCS1-v1_5 using | Optional | # | | SHA-384 | | # | RS512 | RSASSA-PKCS1-v1_5 using | Optional | # | | SHA-512 | | # | ES256 | ECDSA using P-256 and SHA-256 | Recommended+ | # | ES384 | ECDSA using P-384 and SHA-384 | Optional | # | ES512 | ECDSA using P-521 and SHA-512 | Optional | # | PS256 | RSASSA-PSS using SHA-256 and | Optional | # | | MGF1 with SHA-256 | | # | PS384 | RSASSA-PSS using SHA-384 and | Optional | # | | MGF1 with SHA-384 | | # | PS512 | RSASSA-PSS using SHA-512 and | Optional | # | | MGF1 with SHA-512 | | # | none | No digital signature or MAC | Optional | # | | performed | | # +--------------+-------------------------------+--------------------+ # # The use of "+" in the Implementation Requirements column indicates # that the requirement strength is likely to be increased in a future # version of the specification. if hashlib_info: num_bits_to_hash = hashlib_info.get('num_bits_in_alg') / 16 hashlib = hashlib_info.get('digester') hashlib.update(value_to_hash) id_token[element_name] = base64.b64encode(hashlib.digest()[:num_bits_to_hash])