Exemple #1
0
def use_table(pwd_hash, pwd_dict):
    hash_word = pwd_hash[:8]
    for i in range(5):

        if hash_word in pwd_dict:
            dict_pwd = pwd_dict[hash_word]
            for j in range(4 - i):
                dict_pwd = hash(dict_pwd.encode('utf-8')).hexdigest()[:8]
            break
        hash_word = hash(hash_word.encode('utf-8')).hexdigest()[:8]
    return dict_pwd
Exemple #2
0
def cache(key,
          value_lambda,
          cache_path='./.cache/exec_cache.pickle',
          cache_map={}):
    key_hash = hash(key.encode("utf-8")).hexdigest()

    def save():
        if threading.current_thread() is threading.main_thread():
            if not exists(dirname(cache_path)):
                makedirs(dirname(cache_path))
            with open(cache_path, 'wb') as f:
                pickle.dump(cache_map, f, protocol=pickle.HIGHEST_PROTOCOL)

    def load():
        with open(cache_path, 'rb') as f:
            content = pickle.load(f)
        return content

    if len(cache_map.items()) == 0:
        if exists(cache_path):
            cache_map.update(load())
    atexit.register(save)

    if key_hash not in cache_map:
        cache_map[key_hash] = value_lambda()
    return cache_map[key_hash]
Exemple #3
0
def send():
    if request.method == 'GET':
        return redirect('/')

    from datetime import datetime as d

    files = []
    for f in request.files.getlist('image'):
        timestamp = d.now().timestamp()
        hashname = hash(bytes(f.filename, 'utf-8')).hexdigest()
        filename = f'{timestamp}.{hashname}{f.filename[f.filename.rindex("."):]}'
        filedir = pjoin(
            server.config['UPLOAD_FOLDER'],
            filename
        )

        f.save(filedir)
        files.append({
            'dir': filedir,
            'filename': f.filename,
            'name': f.filename[:f.filename.index('.')]
        })

    session['ephisem.files'] = files
    return redirect('/analize')
Exemple #4
0
    def __init__(self, numbytes=160, cipher=None, hash=None):
        if hash is None:
            from hashlib import sha1 as hash

        # The cipher argument is vestigial; it was removed from
        # version 1.1 so RandomPool would work even in the limited
        # exportable subset of the code
        if cipher is not None:
            warnings.warn("'cipher' parameter is no longer used")

        if isinstance(hash, types.StringType):
            # ugly hack to force __import__ to give us the end-path module
            hash = __import__('Crypto.Hash.' + hash,
                              None, None, ['new'])
            warnings.warn("'hash' parameter should now be a hashing module")

        self.bytes = numbytes
        self.bits = self.bytes * 8
        self.entropy = 0
        self._hash = hash

        # Construct an array to hold the random pool,
        # initializing it to 0.
        self._randpool = array.array('B', [0] * self.bytes)

        self._event1 = self._event2 = 0
        self._addPos = 0
        self._getPos = hash().digest_size
        self._lastcounter = time.time()
        self.__counter = 0

        self._measureTickSize()        # Estimate timer resolution
        self._randomize()
    def __init__(self, numbytes = 160, cipher=None, hash=None):
        if hash is None:
            from hashlib import sha1 as hash

        # The cipher argument is vestigial; it was removed from
        # version 1.1 so RandomPool would work even in the limited
        # exportable subset of the code
        if cipher is not None:
            warnings.warn("'cipher' parameter is no longer used")

        if isinstance(hash, types.StringType):
            # ugly hack to force __import__ to give us the end-path module
            hash = __import__('Crypto.Hash.'+hash,
                              None, None, ['new'])
            warnings.warn("'hash' parameter should now be a hashing module")

        self.bytes = numbytes
        self.bits = self.bytes*8
        self.entropy = 0
        self._hash = hash

        # Construct an array to hold the random pool,
        # initializing it to 0.
        self._randpool = array.array('B', [0]*self.bytes)

        self._event1 = self._event2 = 0
        self._addPos = 0
        self._getPos = hash().digest_size
        self._lastcounter=time.time()
        self.__counter = 0

        self._measureTickSize()        # Estimate timer resolution
        self._randomize()
Exemple #6
0
def generate_id():
	""" セッションIDを生成
	
	@return: セッションID
	"""
	from os import urandom
	from hashlib import md5 as hash
	return hash(urandom(64)).hexdigest()
Exemple #7
0
 def scrypt(password, salt, N, r, p, dk_len):
     return hash(password,
                 salt=salt,
                 n=N,
                 r=r,
                 p=p,
                 dklen=dk_len,
                 maxmem=(128 * r * (N + p + 2)))
Exemple #8
0
def hash_pwds(pwd_list):
    pwd_dict = {}
    for pwd in pwd_list:
        hash_word = pwd
        for i in range(5):
            hash_word = hash(hash_word.encode('utf-8')).hexdigest()[:8]
        pwd_dict[hash_word] = pwd
    return pwd_dict
Exemple #9
0
def insert(api, data):
    for line in data:
        hexstr = line.encode(ENCODE_FORMAT).hex()
        pointer = hash(bytearray(line, encoding=ENCODE_FORMAT)).hexdigest()
        # pointer = str(hash(bytearray(line, encoding=ENCODE_FORMAT))
        # ).encode(ENCODE_FORMAT).hex()
        # pointer = pointer.encode('utf-8').hex()
        api.publish(DATA, pointer, hexstr)
        # for line in data:
        # hexstr = line.encode('utf-8').hex()
        attributes = line.split(" ")
        for i in range(len(ATTRIBUTE)):
            api.publish(INDEX, ATTRIBUTE[i] + attributes[i], pointer)
def register_user():
    try:
        users = open(users_info_path, "rb")
        all_users = pickle.load(users)
        users.close()

    except:
        all_users = {}

    while True:

        new_user = input("Please tell me your desired user name: \n ")
        if new_user.lower() in all_users:
            logger.debug("Sorry that name is taken.")
        else:
            password_1 = getpass.getpass("Hello " + new_user +
                                         " please create your password: \n ")
            password_1 = hash(password_1.encode("utf-8"))
            password_2 = getpass.getpass("please enter it one more time: \n ")
            password_2 = hash(password_2.encode("utf-8"))

            if password_1.digest() == password_2.digest():
                all_users[new_user] = password_1.digest()
                users = open(users_info_path, "wb")
                pickle.dump(all_users, users)
                users.close()
                logger.debug(
                    "You are now registered, you may run the program again and login."
                )
                logger.info(new_user + " has been created successfully")
                break
            else:
                logger.debug(
                    "Sorry your passwords didn\"t match, please restart the registration process."
                )
                logger.info(
                    new_user +
                    " tired to create a user, but entered passwords that don\"t match "
                )
Exemple #11
0
def login():
    if request.method == "GET":
        return render_template('login.html')

    password, id, name = db.execute(
        """SELECT password, id, name FROM Users WHERE login=:username""",
        request.form).fetchone()
    if password == hash(request.form['password'].encode()).hexdigest():
        session['user_data'] = {"id": id, "name": name}
        return render_template('login.html',
                               error_message="You are logged in!")

    return render_template('index.html')
Exemple #12
0
def auth():
    username = request.form['username'].lower()
    password = hash(request.form['password']).hexdigest()

    #SQL - RETURNS TUPLE OF USER'S LOG IN DATA
    try:
        user_data = tblog_db.auth_user(username, password)
        username = user_data[0]
        first = user_data[1]
        last = user_data[2]
        email = user_data[3]
        joined = user_data[4]
        posts = user_data[6]
        print 'USER DATA RETRIEVED'

    except:
        print 'COULD NOT GET USER DATA'

    if user_data:

        user = User(username, first, last, email, joined, password)
        '''
        session['logged_in'] = True
        session['id'] = username
        session['first'] = first
        session['last'] = last
        session['email'] = email
        '''

        #FLASK-LOGIN FUNCTION, ALLOWS current_user VAR TO BE USED IN HTML
        if login_user(user):
            print 'USER LOGGED IN'
        if current_user.is_authenticated:
            print 'AUTHENTICATED'
        else:
            print 'NOT AUTHENTICATED'

        if username == "admin":
            return redirect(url_for('admin'))
        else:
            return redirect(url_for('user', uname=username))
    else:
        flash("INVALID USERNAME OR PASSWORD", "login")
        return redirect(url_for('login'))
Exemple #13
0
def register():
    if current_user.is_authenticated:
        logout_user()

    added = False
    formError = False

    first = request.form["first"].lower().capitalize()
    last = request.form["last"].lower().capitalize()
    email = request.form["email"].lower()
    username = request.form["username"].lower()
    password = hash(request.form["password"]).hexdigest()
    joined = strftime("%Y %b %d")

    for i in [username, first, last, email, password]:
        if i == "":
            formError = True
            flash("PLEASE FILL OUT ALL FIELDS", "register")
            break

    # SQL - RETURNS BOOL
    if not formError:
        added = tblog_db.add_user(username, first, last, email, joined,
                                  password)
    '''
    0 = Form error
    1 = User succesfully added
    2 = Username already exists
    '''
    if added == 1:
        user = User(username, first, last, email, joined, password)

        if login_user(user):
            print 'USER LOGGED IN'
        else:
            print 'LOGIN FAILED'

        return redirect(url_for('user', uname=username))
    elif added == 2:
        flash('USERNAME ALREADY EXISTS', "register")
        return redirect(url_for('login'))
    else:
        return redirect(url_for('login'))
Exemple #14
0
def register():
    if request.method == "GET":
        return render_template('register.html')

    check = db.execute("""SELECT id FROM Users WHERE login=:username""",
                       request.form)
    if check.fetchone():
        return render_template('register.html',
                               error_message="User name already taken")

    data = {
        "pass": hash(request.form['password'].encode()).hexdigest(),
        "login": request.form['username']
    }
    db.execute(
        """INSERT INTO Users (login, name, email, age, gender)
                  VALUES (:username, :name, :email, :age, :gender)""",
        request.form)
    db.execute("""UPDATE Users SET password=:pass WHERE login=:login""", data)
    db.commit()
    return render_template('index.html')
Exemple #15
0
def tmpfile(content=None, hash_seed=None, suffix=None):
    if content is not None:
        hash_seed = content
    assert hash_seed is not None, "when no content is provided, a hash seed is mandatory!"

    filename = "/tmp/{}".format(
        hash(hash_seed.encode("utf-8")).hexdigest(), suffix)
    if suffix:
        filename += suffix

    if content is not None:
        content = clean(content)
        content = b64encode(content.encode("utf-8")).decode("utf-8")

        exec(
            "sh",
            args(c="\\\"echo {content} | base64 -d > {filename}\\\"".format(
                content=content, filename=filename)))
    else:
        exec("touch", args(filename))

    return filename
Exemple #16
0
 def _dohash(s):
     h = hash()
     h.update(s.encode())
     return h.hexdigest()
Exemple #17
0
	def _dohash(s):
		h = hash()
		h.update(s.encode())
		return h.hexdigest()
def main():
	# Set up initial state
	url_queue = deque()
	last_visit = dict()
	robot_rules = dict()
	image_filename_cache = dict() # Keep image filenames by URL so we can handle image leechers.  add_page_to_database requires an image filename.

	# Quick restore else save
	last_state = restore_state()
	if last_state:
		robot_rules, url_queue, last_visit, image_filename_cache = last_state
	else:
		url_queue.append(STARTING_PAGE)
		save_state((robot_rules, url_queue, last_visit, image_filename_cache))

	# Begin main search loop
	while len(url_queue):
		now = time.time()
		url = url_queue.popleft()

		# If it is too early to revisit, skip this URL
		if url in last_visit and now - last_visit[url] < REVISIT_DELAY:
			continue

		# If the robots.txt file of this domain does not allow us, skip
		if not spider_allowed(url, robot_rules):
			logging.info("spider.py: main: robots.txt not allowing {}".format(url))
			continue

		# Dump to logs
		logging.info("spider.py: main: Visiting page {}".format(url))

		# Get the page
		response = None
		page_content = None
		parsed_body = None
		try:
			response = requests.get(url, headers=HEADERS)
			page_content = response.content
			parsed_body = html.fromstring(page_content)
		except requests.exceptions.ConnectionError as ce:
			logging.info("spider.py: main: Connection error while getting url {}".format(url))
			continue
		except lxml.etree.XMLSyntaxError as xse:
			logging.info("spider.py: main: Couldn't parse XML/HTML for url {}".format(url))
			continue

		# Find URLs
		outbound_urls = [urlparse.urljoin(response.url, url) for url in parsed_body.xpath('//a/@href')]
		image_urls = [urlparse.urljoin(response.url, url) for url in parsed_body.xpath('//img/@src')]

		# Handle the URLs
		for new_url in outbound_urls:
			if new_url.startswith('http'):
				url_queue.append(new_url)
		for image_url in image_urls:
			# Check to see if we did a get for this URL within the last time span
			if image_url in last_visit and now - last_visit[image_url] < REVISIT_DELAY:
				# If we don't have a stored record of the filename, get the image again so we can calculate it.
				filename = image_filename_cache.get(image_url, None)
				if filename:
					# Add the additional hotlink to the page.  This link was arrived at by another path.  Leech or extra linking.
					add_page_to_database(filename, url, page_content)
					continue;
			# Either we've not seen this picture before or we don't know what it looks like because we lost the hash.  Get it again:
			try:
				# Otherwise get the image
				image_response = requests.get(image_url)
				# Mark our read time
				last_visit[image_url] = now
				# Read as image
				temp_io = StringIO(image_response.content)
				temp_io.seek(0)
				image = Image.open(temp_io)
				# Make sure the image is above the minimum size before we store it in the database.
				#if image.size[0] < MIN_IMAGE_SIZE or image.size[1] < MIN_IMAGE_SIZE: #TODO: This might cause problems with the anti-leech code above. Ignore min size, maybe?
				#	continue
				# Save to file
				filename = image_url.split('/')[-1] # To avoid conflicts, hash the filename
				#filename = hash(str(now) + filename).hexdigest() + filename[-4:] # But keep the extension
				filename = hash(image.tostring()).hexdigest() + filename[-4:]
				filepath = os.path.join(MEDIA_ROOT, filename)
				if not os.path.isfile(filepath):
					#fout = open(filepath, 'w')
					#fout.write(image_response.content)
					#fout.close()
					try:
						thumbnail = make_thumbnail(image)
						thumbnail.save(filepath)
					except KeyError as ke:
						logging.info("spider.py: main: Image extension unrecognized.  Adding .jpg suffix. {}".format(filename[-10:]))
						filepath += ".jpg"
						filename += ".jpg"
						thumbnail = make_thumbnail(image)
						thumbnail.save(filepath)
				else:
					logging.info("spider.py: main: Image already saved {}".format(image))
				# Keep the filename so we can log other pages which link the images WITHOUT reloading the source.
				image_filename_cache[image_url] = filename
				# Push to database
				add_image_to_database(filename, image_url)
				add_page_to_database(filename, url, page_content)
				# Push to log
				logging.info("spider.py: main: Added image {} -> {}".format(image_url, filename[:10] + ".." + filename[-10:]))
			except AttributeError as ae:
				logging.warn("spider.py: main: AttributeError while processing url {}: {}".format(image_url, str(ae)))
			except IOError as ioe:
				logging.warn("spider.py: main: IOException while processing url {}: {}".format(image_url, str(ioe)))
			except requests.packages.urllib3.exceptions.LocationParseError as pe:
				logging.warn("spider.py: main: LocationParseError while getting url {}".format(image_url))

		# Mark this as complete and save our state
		last_visit[url] = now
		save_state((robot_rules, url_queue, last_visit, image_filename_cache))

		# Throttle
		time.sleep(1)
def _bucket_cachekey(bucket_name: str) -> str:
    return hash(f"bucket-name__{bucket_name}".encode()).hexdigest()
Exemple #20
0
 def scrypt(password, salt, N, r, p, dk_len):
     return hash(password, salt, N, r, p, dk_len)
Exemple #21
0
 def is_authentic(self, user: User, provided_password):
     return hash(
         provided_password.encode("utf-8")).digest() == user.hashed_password
Exemple #22
0
 def get_digest(self, key, value):
     string = ':'.join([self.secret, key, value])
     return hash(string).hexdigest()
    from os import getcwd
    parser = argparse.ArgumentParser()
    parser.add_argument('--directory',
                        '-d',
                        default=getcwd(),
                        help='Specify alternative directory '
                        '[default:current directory]')
    parser.add_argument('port',
                        action='store',
                        default=8000,
                        type=int,
                        nargs='?',
                        help='Specify alternate port [default: 8000]')
    args = parser.parse_args()

    from hashlib import md5 as hash
    from random import random as rnd
    args.salt = '/' + hash(str(rnd()).encode()).hexdigest() + '/'

    from socket import gethostname, gethostbyname
    args.bind = gethostbyname(gethostname())

    handler = partial(SaltedHTTPRequestHandler,
                      salt=args.salt,
                      directory=args.directory)

    with ThreadingHTTPServer(('', args.port), handler) as httpd:
        message = 'Serving HTTP on http://{}:{}{} ...'
        print(message.format(args.bind, args.port, args.salt))
        httpd.serve_forever()
Exemple #24
0
 def create_id(self, doc):
     h = hash()
     h.update(str(doc).encode('utf-8'))
     return h.hexdigest()
Exemple #25
0
def cipherEngine(s):
    print(s, ': ', hash(s.encode()).hexdigest())
    return hash(s.encode()).hexdigest()
Exemple #26
0
from qrcode import *
import hashlib

data = 

qr = QRCode(version=None, error_correction=ERROR_CORRECT_L)

hashlib.hash()


qr.add_data("http://blog.matael.org/")
qr.make() # Generate the QRCode itself

# im contains a PIL.Image.Image object
im = qr.make_image()

# To save it
im.save("filename.png")
Exemple #27
0
 def create_id(self, doc):
     h = hash()
     h.update(str(doc).encode('utf-8'))
     return h.hexdigest()
Exemple #28
0
	def gen_key(a):
		return hash(a[0]+a[1]).hexdigest()
 def _fileCacheKey(self, name: str) -> str:
     return hash(f"{self.bucket.name}__{name}".encode()).hexdigest()