def setup_app(app, core, log): config = core.config if not config['auth_secret']: config['auth_secret'] = secrets.token_hex() app['spritzle.log'] = log app['spritzle.core'] = core app['spritzle.config'] = config app.middlewares.extend([error_middleware, debug_middleware]) async def on_startup(app): await app['spritzle.core'].start() async def on_shutdown(app): await app['spritzle.core'].stop() app.on_startup.append(on_startup) app.on_shutdown.append(on_shutdown) app.router.add_routes(auth_routes) app.router.add_routes(config_routes) app.router.add_routes(core_routes) app.router.add_routes(session_routes) app.router.add_routes(torrent_routes)
def wrapper(**kwargs): #Only track user id if they are authenticated if current_user.is_authenticated: user_id = current_user.id else: user_id = None #Get session id if it exists; Reminder, session id lasts 8 years or until cookies are deleted session_id = session.get('FVID') # Get To URL base_url = request.base_url # Get From Url referrer_url = request.referrer if request.referrer else 'None' # get user agent user_agent = request.headers.get('User-Agent') # If no session data, then create new session data if session.get('FVID') is None: session['FVID'] = secrets.token_hex(32) session_id = session.get('FVID') # If user is clicking an outbound link, track this instead out /outboundLinks to_url = request.args.get('url', default=None, type=None) if base_url == request.url_root + 'outboundLinks' else base_url pv = userPageView(session_id = session_id, from_page = referrer_url, to_page = to_url, user_id = user_id, user_agent = user_agent) if track_pageviews: db.session.add(pv) db.session.commit() else: print(pv) x = func(**kwargs) return x
def test_backup_010_create(conn, creds): _check() pool = _get_pool(conn) conn.ws.call('filesystem.file_receive', f'/mnt/{pool["name"]}/s3_test/foo', secrets.token_hex(3)) req = conn.rest.post('backup', data=[{ "description": "desc", "direction": "PUSH", "path": f"/mnt/{pool['name']}/s3_test", "credential": creds['credid'], "minute": "00", "hour": "03", "daymonth": "*", "dayweek": "*", "month": "*", "attributes": { "bucket": os.environ['BACKUP_AWS_BUCKET'], "folder": "", "region": os.environ['BACKUP_AWS_REGION'], }, }]) assert req.status_code == 200 creds['backupid'] = req.json() assert isinstance(creds['backupid'], int) is True
def call(self): """ For given size, generate random string with all ascii and digits """ self.user.api_key = secrets.token_hex(self.size) db.session.commit() return self.user.api_key
def start_sha2(self, msg): if msg == ACK: self.rax = secrets.token_hex(32) self.ra = hex_to_message(self.rax) self.handler = self.await_sha2_client_response self.send_packet('*#%s##' % (self.ra)) else: self.transport.close()
def _make_filename(): "Create a random filename for the shared memory object." # number of random bytes to use for name nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 assert nbytes >= 2, '_SHM_NAME_PREFIX too long' name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) assert len(name) <= _SHM_SAFE_NAME_LENGTH return name
def test_token_hex(self): # Test token_hex. for n in (1, 12, 25, 90): with self.subTest(n=n): s = secrets.token_hex(n) self.assertIsInstance(s, str) self.assertEqual(len(s), 2*n) self.assertTrue(all(c in string.hexdigits for c in s))
async def start_build(self, batch_client): assert not self.batch try: log.info(f'merging for {self.number}') repo_dir = f'repos/{self.target_branch.branch.repo.short_str()}' merge_script = f''' set -ex if [ ! -d "{shq(repo_dir)}" ]; then mkdir -p {shq(repo_dir)} git clone {shq(self.target_branch.branch.repo.url)} {shq(repo_dir)} git -C {shq(repo_dir)} config user.email [email protected] git -C {shq(repo_dir)} config user.name hail-ci else git -C {shq(repo_dir)} reset --merge git -C {shq(repo_dir)} fetch origin fi git -C {shq(repo_dir)} remote add {shq(self.source_repo.short_str())} {shq(self.source_repo.url)} || true git -C {shq(repo_dir)} fetch {shq(self.source_repo.short_str())} git -C {shq(repo_dir)} checkout {shq(self.target_branch.sha)} git -C {shq(repo_dir)} merge {shq(self.source_sha)} -m 'merge PR' ''' await check_shell(merge_script) sha_out, _ = await check_shell_output( f'git -C {shq(repo_dir)} rev-parse HEAD') self.sha = sha_out.decode('utf-8').strip() with open(f'{repo_dir}/build.yaml', 'r') as f: config = BuildConfiguration(self, f.read()) except (CalledProcessError, FileNotFoundError) as e: log.exception(f'could not open build.yaml due to {e}') self.build_state = 'merge_failure' self.target_branch.batch_changed = True return batch = None try: log.info(f'creating batch for {self.number}') batch = await batch_client.create_batch( attributes={ 'token': secrets.token_hex(16), 'target_branch': self.target_branch.branch.short_str(), 'pr': str(self.number), 'source_sha': self.source_sha, 'target_sha': self.target_branch.sha }) await config.build(batch, self) await batch.close() self.batch = batch finally: if batch and not self.batch: await batch.cancel()
def save_post_pictures(form_picture): random_hex = secrets.token_hex(8) picture_fn = form_picture.filename picture_path = os.path.join(app.root_path, 'static/imgs/posts', picture_fn) output_size = (125, 125) i = Image.open(form_picture) i.save(picture_path) return picture_fn
def _generate_token(self): ''' Generates a token using PEP 506 secrets module (if available), or falling back to a SHA1-salted random. ''' if secrets: # use PEP 506 secrets module return secrets.token_hex() else: salt = str(random.randrange(0, _MAX_CSRF_KEY)).encode('utf-8') return hashlib.sha1(salt).hexdigest()
def save_account_picture(form_picture): random_hex = secrets.token_hex(8) _, f_ext = os.path.splitext(form_picture.filename) picture_fn = random_hex + f_ext picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn) output_size = (125, 125) i = Image.open(form_picture) i.thumbnail(output_size) i.save(picture_path) return picture_fn
def _auth_params(self): ts = int(datetime.now().timestamp()) salt = secrets.token_hex(8) key = self._secret.encode('utf8') msg = '{}{}'.format(ts, salt).encode('utf8') sign = hmac.new(key, msg, hashlib.md5) return { 'api_key': self._key, 'timestamp': ts, 'salt': salt, 'signature': sign.hexdigest(), }
def ownCalcHmacSha2(password, ra): """ calculates the response to the SHA2 HMAC challenge returns (rb, hmac(ra, rb, a, b, kab), hmac(ra, rb, kab)) """ if len(ra) == 128: rax = message_to_hex(ra) rbx = secrets.token_hex(32) rb_msg = hex_to_message(rbx) kab = sha256_calc_kab(password) c_msg = sha256_calc_client_response(rax, rbx, kab) s_msg = sha256_calc_server_response(rax, rbx, kab) return (rb_msg, c_msg, s_msg) raise AttributeError
def login(): login_user = usersCollection.find_one({'email' : request.json['email']}) if login_user: if bcrypt.checkpw(request.json['password'].encode('utf-8'), login_user['password']): token = secrets.token_hex(20) tokensCollection.delete_one({ 'email' : request.json['email']}) tokensCollection.insert({ 'token': token, 'email': request.json['email'], 'expiry': (datetime.now() + timedelta(hours=1)).strftime("%Y-%m-%dT%H:%M:%S.000Z") }) response = { 'email': login_user['email'], 'firstName': login_user['firstName'], 'token': token, 'permission': login_user['permission'] } return mongo.prepareResponse(response) raise exceptionHandler.InvalidUsage('Invalid email/password combination', status_code=420)
def __init__(self, name=None, instance_retries=3): self.launch_failures_left = instance_retries if name is None: name = os.environ.get('BUILD_TAG', 'snaptest-{}'.format(secrets.token_hex(nbytes=6))) self.name = name self.prefix = '' # customize later within test self.volatile_images = [] self.session, self.rc = session_from_args(rc=True) self.lease = self._create_lease() self.instance = None self.nets = [get_net(self.session)['id']]
def create_license(json): if json is None: return json, 'Post data was not json' if 'company' in json: if len(json['company']) > 4: if License.query.filter_by(company=json['company']).first(): return None, 'company name exists already' return License(token=secrets.token_hex(32), company=json['company']), None return None, 'company must be more then 4 characters' return None, 'company must be set as json'
def test_token_defaults(self): # Test that token_* functions handle default size correctly. for func in (secrets.token_bytes, secrets.token_hex, secrets.token_urlsafe): with self.subTest(func=func): name = func.__name__ try: func() except TypeError: self.fail("%s cannot be called with no argument" % name) try: func(None) except TypeError: self.fail("%s cannot be called with None" % name) size = secrets.DEFAULT_ENTROPY self.assertEqual(len(secrets.token_bytes(None)), size) self.assertEqual(len(secrets.token_hex(None)), 2*size)
def main(): parser = argparse.ArgumentParser(description='Spritzled') parser.add_argument( '--debug', dest='debug', default=False, action='store_true') parser.add_argument('-p', '--port', dest='port', default=8080, type=int) parser.add_argument('-c', '--config_dir', dest='config_dir', type=str) parser.add_argument('-l', '--log-level', default='INFO', dest='log_level', type=str) args = parser.parse_args() log = setup_logger(name='spritzle', level=args.log_level) log.info(f'spritzled starting.. args: {args}') app['spritzle.log'] = log loop = asyncio.get_event_loop() loop.set_debug(args.debug) config = Config('spritzle.conf', args.config_dir) # Prevent more than one process using the same config path from running. f = Path(config.path, 'spritzled.lock').open(mode='w') try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as e: log.error(f'Another instance of Spritzle is running: {e}') log.error('Exiting..') sys.exit(0) if not config['auth_secret']: config['auth_secret'] = secrets.token_hex() app['spritzle.config'] = config app['spritzle.core'] = Core(app['spritzle.config']) async def on_startup(app): await app['spritzle.core'].start() async def on_shutdown(app): await app['spritzle.core'].stop() app.on_startup.append(on_startup) app.on_shutdown.append(on_shutdown) setup_routes() aiohttp.web.run_app(app)
def test_reset_auth_token_happy(self): import secrets from flamenco.managers.linking_routes import _compute_hash with self.app.app_context(): mngr_doc, account, old_token_info = self.create_manager_service_account() manager_id = mngr_doc['_id'] # Exchange two keys ident = self._normal_exchange() self.post('/api/flamenco/managers/link/exchange', json={'key': 'aabbccddeeff'}) coll = self.flamenco.db('manager_linking_keys') self.assertEqual(2, coll.count()) # Bind them to the same Manager coll.update_many({}, {'$set': {'manager_id': manager_id}}) # Check that both secret keys are gone after requesting an auth token reset. padding = secrets.token_hex(32) msg = f'{padding}-{ident}-{manager_id}' mac = _compute_hash(secret_bin, msg.encode('ascii')) payload = { 'manager_id': str(manager_id), 'identifier': str(ident), 'padding': padding, 'hmac': mac, } resp = self.post('/api/flamenco/managers/link/reset-token', json=payload) # Test the token by getting the manager document. token_info = resp.get_json() token = token_info['token'] self.get(f'/api/flamenco/managers/{manager_id}', auth_token=token) # The old token shouldn't work any more. self.get(f'/api/flamenco/managers/{manager_id}', auth_token=old_token_info['token'], expected_status=403)
expires_after TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, user_id INTEGER REFERENCES users(id), PRIMARY KEY (session_id)); ''' invite_codes_sql = '''CREATE TABLE IF NOT EXISTS invite_codes ( id SERIAL, code VARCHAR(254) NOT NULL DEFAULT '', created_from VARCHAR(254) NOT NULL DEFAULT 0, PRIMARY KEY (id));''' c.execute(user_sql) c.execute(session_sql) c.execute(invite_codes_sql) db.commit() ''' Save new user is username and password is given. ''' if len(sys.argv) > 1: salt = secrets.token_hex(32) h = hashlib.sha512() h.update(str.encode(salt)) h.update(str.encode(sys.argv[2])) user_hash = h.hexdigest() sql = "INSERT INTO users (username, salt, hash) \ VALUES ('{}', '{}', '{}');".format(sys.argv[1], salt, user_hash) c.execute(sql) db.commit()
def _generate_challenge_id() -> ChallengeId: return ChallengeId(secrets.token_hex(16))
def perform_create(self, serializer): serializer.save(password=secrets.token_hex(32))
def password(): myPassword = Label(root, text=secrets.token_hex(n)).pack()
def generate_token(): return secrets.token_hex(32)
if args.d: logging.basicConfig(level=logging.DEBUG, format='%(message)s') else: logging.basicConfig(level=logging.INFO, format='%(message)s') dev = Device(args.serial) imei = dev.get_imei() identity = f'urn:imei:{imei}' logging.info('Identity: %s', identity) # Remove previous keys dev.delete_sec_tag(35724861) dev.delete_sec_tag(35724862) # Generate and store Bootstrap keys psk = token_hex(16) dev.store_psk(35724862, identity, psk) coiote = Coiote() coiote.get_device(identity) coiote.get_device(identity + '-bs') if coiote.get_device(identity): coiote.delete_device(identity) if coiote.get_device(identity + '-bs'): coiote.delete_device(identity + '-bs') coiote.create_device(identity, psk)
password=passphrase.encode('utf8'), backend=default_backend(), ) # We will need the authentication_url again later, # so it is handy to define here authentication_url = 'https://api.box.com/oauth2/token' claims = { 'iss': config['boxAppSettings']['clientID'], 'sub': config['enterpriseID'], 'box_sub_type': 'enterprise', 'aud': authentication_url, # This is an identifier that helps protect against # replay attacks 'jti': secrets.token_hex(64), # We give the assertion a lifetime of 45 seconds # before it expires 'exp': round(time.time()) + 45 } keyId = config['boxAppSettings']['appAuth']['publicKeyID'] # Rather than constructing the JWT assertion manually, we are # using the pyjwt library. assertion = jwt.encode( claims, key, # The API support "RS256", "RS384", and "RS512" encryption algorithm='RS512', headers={
def Main(args=None): ParseCommandLine(args) config["Logging"] = {"LogFile": "__screen__", "LogLevel": "INFO"} plogger.setup_loggers(config.get("Logging", {})) sys.stdout = plogger.stream_to_logger(logging.getLogger("STDOUT"), logging.DEBUG) sys.stderr = plogger.stream_to_logger(logging.getLogger("STDERR"), logging.WARN) logger.info("***************** TRUSTED COMPUTE FRAMEWORK (TCF)" + " *****************") global direct_jrpc direct_jrpc = DirectJsonRpcApiConnector(config_file=None, config=config) global address if mode == "registry" and address: logger.info("\n Worker registry contract address is unsupported \n") sys.exit(-1) # Connect to registry list and retrieve registry global uri if not uri and mode == "listing": registry_list_instance = direct_jrpc.create_worker_registry_list( config) # Lookup returns tuple, first element is number of registries and # second is element is lookup tag and third is list of organization ids. registry_count, lookup_tag, registry_list = registry_list_instance.registry_lookup( ) logger.info( "\n Registry lookup response: registry count: {} lookup tag: {} registry list: {}\n" .format(registry_count, lookup_tag, registry_list)) if (registry_count == 0): logger.warn("No registries found") sys.exit(1) # Retrieve the fist registry details. registry_retrieve_result = registry_list_instance.registry_retrieve( registry_list[0]) logger.info("\n Registry retrieve response: {}\n".format( registry_retrieve_result)) config["tcf"]["json_rpc_uri"] = registry_retrieve_result[0] # Prepare worker req_id = 31 global worker_id if not worker_id: worker_registry_instance = direct_jrpc.create_worker_registry(config) worker_lookup_result = worker_registry_instance.worker_lookup( worker_type=WorkerType.TEE_SGX, id=req_id) logger.info("\n Worker lookup response: {}\n".format( json.dumps(worker_lookup_result, indent=4))) if "result" in worker_lookup_result and \ "ids" in worker_lookup_result["result"].keys(): if worker_lookup_result["result"]["totalCount"] != 0: worker_id = worker_lookup_result["result"]["ids"][0] else: logger.error("ERROR: No workers found") sys.exit(1) else: logger.error("ERROR: Failed to lookup worker") sys.exit(1) req_id += 1 worker_retrieve_result = worker_registry_instance.worker_retrieve( worker_id, req_id) logger.info("\n Worker retrieve response: {}\n".format( json.dumps(worker_retrieve_result, indent=4))) if "error" in worker_retrieve_result: logger.error("Unable to retrieve worker details\n") sys.exit(1) # Initializing Worker Object worker_obj = worker_details.SGXWorkerDetails() worker_obj.load_worker(worker_retrieve_result) logger.info("**********Worker details Updated with Worker ID" + \ "*********\n%s\n", worker_id) # Convert workloadId to hex global workload_id workload_id = workload_id.encode("UTF-8").hex() work_order_id = secrets.token_hex(32) requester_id = secrets.token_hex(32) session_iv = utility.generate_iv() session_key = utility.generate_key() requester_nonce = secrets.token_hex(16) # Create work order wo_params = WorkOrderParams( work_order_id, worker_id, workload_id, requester_id, session_key, session_iv, requester_nonce, result_uri=" ", notify_uri=" ", worker_encryption_key=worker_obj.encryption_key, data_encryption_algorithm="AES-GCM-256") # Add worker input data global in_data wo_params.add_in_data(in_data) # Sign work order private_key = utility.generate_signing_keys() wo_params.add_encrypted_request_hash() if wo_params.add_requester_signature(private_key) == False: logger.info("Work order request signing failed\n") sys.exit(1) # Submit work order logger.info("Work order submit request : %s, \n \n ", wo_params.to_string()) work_order_instance = direct_jrpc.create_work_order(config) req_id += 1 response = work_order_instance.work_order_submit(wo_params.get_params(), wo_params.get_in_data(), wo_params.get_out_data(), id=req_id) logger.info("Work order submit response : {}\n ".format( json.dumps(response, indent=4))) if "error" in response and response["error"][ "code"] != WorkOrderStatus.PENDING: sys.exit(1) # Retrieve result req_id += 1 res = work_order_instance.work_order_get_result(work_order_id, req_id) logger.info("Work order get result : {}\n ".format( json.dumps(res, indent=4))) if "result" in res: decrypted_res = utility.decrypted_response(json.dumps(res), session_key, session_iv) logger.info("\nDecrypted response:\n {}".format(decrypted_res)) else: sys.exit(1) # Retrieve receipt wo_receipt_instance = direct_jrpc.create_work_order_receipt(config) req_id += 1 receipt_res = wo_receipt_instance.work_order_receipt_retrieve( work_order_id, id=req_id) logger.info("\Retrieve receipt response:\n {}".format( json.dumps(receipt_res, indent=4)))
output_bucket = gcs_client.bucket(args.output_bucket_name) input_bucket.get_blob(args.input_path + "/" + args.input_filename).download_to_filename( args.input_filename) if args.input_filename.endswith('csv'): input_data = pd.read_csv(args.input_filename) df_raw = pd.DataFrame(input_data) elif args.input_filename.endswith('pkl'): df_raw = pd.read_pickle(args.input_filename) if args.unique_by_cols: groupcols = args.unique_by_cols.split(",") temp = df_raw.drop_duplicates(groupcols) ids = [secrets.token_hex(int(args.hash_len)) for x in range(len(temp))] temp[args.index_col_name] = ids temp2 = temp[groupcols + [args.index_col_name]] df = df_raw.merge(temp2, on=groupcols) else: ids = [secrets.token_hex(args.hash_len) for x in range(len(df_raw))] df_raw[args.index_col_name] = ids df = df_raw if args.output_filename.endswith('pkl'): df.to_pickle(args.output_filename) else: df.to_csv(args.output_filename, index=False) blob = output_bucket.blob(args.output_path + "/" + args.output_filename) blob.upload_from_filename(args.output_filename)
def token(): return sc.token_hex(16)
from flask_bootstrap import Bootstrap from flask import Flask, render_template, flash, request, redirect,\ session, url_for from pymongo import MongoClient from secrets import token_hex from forms import * import datetime # Ρυθμίσεις εφαρμογής DEBUG = True app = Flask(__name__) bootstrap = Bootstrap(app) app.config.from_object(__name__) app.config['SECRET_KEY'] = token_hex(16) client = MongoClient('mongodb://*****:*****@site.com", "movies_seen":[], "passw":"admin", "is_admin":True}) # Εγγραφή ενός χρήστη @app.route("/register", methods=["GET", "POST"]) def register(): # φόρμα εγγραφής form = RegisterForm()
def gen_token(): token = secrets.token_hex(32) while db.exists("USER").where(curr_token=token): token = secrets.token_hex(32) return token
def config_file_fixture(): """Fixture representing the local config file contents.""" return { CONF_INSTANCE_ID: str(uuid4()), CONF_WEBHOOK_ID: secrets.token_hex() }
import secrets import db database = db.Db() database.new_api_key(secrets.token_hex(),'Akos')
async def setup_smartapp_endpoint(opp: OpenPeerPower): """ Configure the SmartApp webhook in opp. SmartApps are an extension point within the SmartThings ecosystem and is used to receive push updates (i.e. device updates) from the cloud. """ data = opp.data.get(DOMAIN) if data: # already setup return # Get/create config to store a unique id for this opp.instance. store = opp.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) config = await store.async_load() if not config: # Create config config = { CONF_INSTANCE_ID: str(uuid4()), CONF_WEBHOOK_ID: secrets.token_hex(), CONF_CLOUDHOOK_URL: None, } await store.async_save(config) # Register webhook webhook.async_register(opp, DOMAIN, "SmartApp", config[CONF_WEBHOOK_ID], smartapp_webhook) # Create webhook if eligible cloudhook_url = config.get(CONF_CLOUDHOOK_URL) if (cloudhook_url is None and opp.components.cloud.async_active_subscription() and not opp.config_entries.async_entries(DOMAIN)): cloudhook_url = await opp.components.cloud.async_create_cloudhook( config[CONF_WEBHOOK_ID]) config[CONF_CLOUDHOOK_URL] = cloudhook_url await store.async_save(config) _LOGGER.debug("Created cloudhook '%s'", cloudhook_url) # SmartAppManager uses a dispatcher to invoke callbacks when push events # occur. Use opp. implementation instead of the built-in one. dispatcher = Dispatcher( signal_prefix=SIGNAL_SMARTAPP_PREFIX, connect=functools.partial(async_dispatcher_connect, opp), send=functools.partial(async_dispatcher_send, opp), ) # Path is used in digital signature validation path = (urlparse(cloudhook_url).path if cloudhook_url else webhook.async_generate_path(config[CONF_WEBHOOK_ID])) manager = SmartAppManager(path, dispatcher=dispatcher) manager.connect_install(functools.partial(smartapp_install, opp)) manager.connect_update(functools.partial(smartapp_update, opp)) manager.connect_uninstall(functools.partial(smartapp_uninstall, opp)) opp.data[DOMAIN] = { DATA_MANAGER: manager, CONF_INSTANCE_ID: config[CONF_INSTANCE_ID], DATA_BROKERS: {}, CONF_WEBHOOK_ID: config[CONF_WEBHOOK_ID], # Will not be present if not enabled CONF_CLOUDHOOK_URL: config.get(CONF_CLOUDHOOK_URL), } _LOGGER.debug( "Setup endpoint for %s", cloudhook_url if cloudhook_url else webhook.async_generate_url( opp, config[CONF_WEBHOOK_ID]), )
# random generate # import string # user_name = input("enter your name") # print(user_name[::-1]) # x = string.punctuation # print(user_name + x) import secrets user_name = input("enter your name") print(user_name[::-1]) x = secrets.token_hex() print(user_name + x)
async def InitializeJobEnv(self, request, context): # TODO(fyrestone): Handle duplicated InitializeJobEnv requests # when initializing job environment. # TODO(fyrestone): Support reinitialize job environment. # TODO(fyrestone): Use job id instead of unique id. unique_id = secrets.token_hex(6) # Parse the job description from the request. try: job_description_data = json.loads(request.job_description) job_info = JobInfo(unique_id=unique_id, temp_dir=self._dashboard_agent.temp_dir, log_dir=self._dashboard_agent.log_dir, **job_description_data) except json.JSONDecodeError as ex: error_message = str(ex) error_message += f", job_payload:\n{request.job_description}" logger.error("[%s] Initialize job environment failed, %s.", unique_id, error_message) return job_agent_pb2.InitializeJobEnvReply( status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED, error_message=error_message) except Exception as ex: logger.exception(ex) return job_agent_pb2.InitializeJobEnvReply( status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED, error_message=traceback.format_exc()) async def _initialize_job_env(): os.makedirs(job_consts.JOB_DIR.format(temp_dir=job_info.temp_dir, unique_id=unique_id), exist_ok=True) # Download the job package. await DownloadPackage(job_info, self._dashboard_agent.http_session).run() # Start the driver. logger.info("[%s] Starting driver.", unique_id) language = job_info.language if language == job_consts.PYTHON: driver = await StartPythonDriver( job_info, self._dashboard_agent.redis_address, self._dashboard_agent.redis_password).run() else: raise Exception(f"Unsupported language type: {language}") job_info.driver = driver initialize_task = create_task(_initialize_job_env()) try: await initialize_task except asyncio.CancelledError: logger.error("[%s] Initialize job environment has been cancelled.", unique_id) return job_agent_pb2.InitializeJobEnvReply( status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED, error_message="InitializeJobEnv has been cancelled, " "did you call CleanJobEnv?") except Exception as ex: logger.exception(ex) return job_agent_pb2.InitializeJobEnvReply( status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED, error_message=traceback.format_exc()) driver_pid = 0 if job_info.driver: driver_pid = job_info.driver.pid logger.info( "[%s] Job environment initialized, " "the driver (pid=%s) started.", unique_id, driver_pid) return job_agent_pb2.InitializeJobEnvReply( status=agent_manager_pb2.AGENT_RPC_STATUS_OK, driver_pid=driver_pid)
def secretkey(): return secrets.token_hex(24)
#!/usr/bin/python3 from flask import Flask, render_template, url_for, redirect, request import secrets import pyclamd import os import json app = Flask(__name__) app.config['SECRET_KEY'] = secrets.token_hex(32) app.config['FILES_PATH'] = '/var/lib/files/' cd = pyclamd.ClamdAgnostic() if not cd.ping(): raise Exception('Unable to connect to clamd') @app.route('/status') @app.route('/version') def status(): return json.dumps({'version': cd.version()}) @app.route('/scan/<digest>') def scan(digest): path = app.config['FILES_PATH'] + digest desc = cd.scan_file(path) status = 'UNSAFE' if desc is None: status = 'SAFE' else: desc = desc[path]
def get_rave_ref(): token = secrets.token_hex(10) taken_ref = session.query(Portfolio.rave_ref) while token in taken_ref: token = secrets.token_hex(10) return token
def execute_snapshot(remote, key_file, rc): '''Do the snapshot''' remote_env = { 'OS_USERNAME': rc['OS_USERNAME'], 'OS_PASSWORD': rc['OS_PASSWORD'], } # prompts = { # 'Please enter your Chameleon username: '******'OS_USERNAME'], # 'Please enter your Chameleon password: '******'OS_PASSWORD'], # } fab_settings = { 'user': '******', 'host_string': remote, 'key_filename': key_file, 'abort_on_prompts': True, 'warn_only': True, # no security! 'reject_unknown_hosts': False, 'disable_known_hosts': True, # 'prompts': prompts, } # contrive passwords for debugging ccpass = secrets.token_hex(nbytes=128 // 8) ccapass = secrets.token_hex(nbytes=128 // 8) print('Debug Passwords:') print('{:>10s} {}'.format('cc', ccpass)) print('{:>10s} {}'.format('ccadmin', ccapass)) # test fast fail print('checking if it fails quickly if bad credentials passed') start = time.monotonic() with fapi.settings(**fab_settings), \ fcm.cd(REMOTE_WORKSPACE), \ fcm.shell_env(OS_USERNAME='******', OS_PASSWORD='******'): fapi.run('chmod +x cc-snapshot') out = fapi.sudo('./cc-snapshot') elapsed = time.monotonic() - start assert elapsed < 5 assert out.return_code != 0 assert 'check username' in out print('doing a real snapshot run...') with fapi.settings(**fab_settings), \ fcm.cd(REMOTE_WORKSPACE), \ fcm.shell_env(**remote_env): # debug passwords fapi.sudo("echo -e 'cc:{}\\nccadmin:{}' | chpasswd".format(ccpass, ccapass)) # fapi.run('chmod +x cc-snapshot') out = fapi.sudo('./cc-snapshot') if out.return_code != 0: raise RuntimeError('snapshot returned non-zero!') print('snapshot finished!') lines = out.splitlines() ilines = iter(lines) for line in ilines: if parse_line(line) == ['Property', 'Value']: next(ilines) # consume the ---- line break image_info = {} for line in ilines: if '------------' in line: # end of table break try: key, value = parse_line(line) except TypeError: print('could not parse line: "{}"'.format(line)) continue image_info[key] = value if not image_info: raise RuntimeError('could not parse image info!') pprint(image_info) return image_info
from secrets import token_hex import os SECRET_KEY = token_hex(16) SQLALCHEMY_DATABASE_URI = os.environ.get( 'DATABASE_URL') or 'sqlite:////' + os.path.abspath( os.getcwd()) + '/Database/database.db' SQLALCHEMY_TRACK_MODIFICATIONS = False MAIL_SERVER = 'smtp.googlemail.com' MAIL_PORT = 587 MAIL_USE_TLS = True MAIL_USERNAME = os.environ.get('MAIL_USERNAME') MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') MAIL_SUBJECT_PREFIX = 'Ig-Clone : ' MAIL_DEFAULT_SENDER = 'Drac From ig-clone' DROPZONE_DEFAULT_MESSAGE = 'Drop or Click to upload <br> (max 3 files can be uploaded)' DROPZONE_ALLOWED_FILE_TYPE = 'image' DROPZONE_MAX_FILE_SIZE = 5 DROPZONE_MAX_FILES = 3 DROPZONE_UPLOAD_ON_CLICK = True DROPZONE_IN_FORM = True DROPZONE_ENABLE_CSRF = True DROPZONE_UPLOAD_MULTIPLE = True DROPZONE_PARALLEL_UPLOADS = 3 DROPZONE_UPLOAD_BTN_ID = 'submit' DROPZONE_UPLOAD_ACTION = 'post.upload_image' WHOOSH_BASE = '../Whoosh_Base/'
def _encode_jwt(hass: HomeAssistant, data: dict) -> str: """JWT encode data.""" if (secret := hass.data.get(DATA_JWT_SECRET)) is None: secret = hass.data[DATA_JWT_SECRET] = secrets.token_hex()
def start_subsystem(self, name, transport, channel): self.sock = channel username = self.get_server().transport.get_username() home = 'root' if username == 'root' else 'home/%s' % username self._send_server_version() while True: try: t, data = self._read_packet() msg = paramiko.Message(data) req_num = msg.get_int() if t == sftp.CMD_REALPATH: path = msg.get_text() self.log_event(sftp_cmd='realpath', path=path) self.send_response( req_num, sftp.CMD_NAME, 1, '/' + clean_path(home + '/' + path.replace('\\', '/')), '', paramiko.SFTPAttributes(), ) elif t == sftp.CMD_OPEN: self.log_event(sftp_cmd='open', path=msg.get_text()) self.send_response(req_num, sftp.CMD_HANDLE, secrets.token_hex(4)) elif t == sftp.CMD_CLOSE: self.log_event(sftp_cmd='close') self.send_status(req_num, paramiko.SFTP_OK) elif t == sftp.CMD_READ: self.log_event(sftp_cmd='read') self.send_status(req_num, paramiko.SFTP_BAD_MESSAGE) elif t == sftp.CMD_WRITE: self.log_event(sftp_cmd='write') self.send_status(req_num, paramiko.SFTP_OK) elif t == sftp.CMD_REMOVE: self.log_event(sftp_cmd='remove', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_RENAME: self.log_event( sftp_cmd='rename', source=msg.get_text(), destination=msg.get_text(), ) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_MKDIR: self.log_event(sftp_cmd='mkdir', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_RMDIR: self.log_event(sftp_cmd='rmdir', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_OPENDIR: self.log_event(sftp_cmd='opendir', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_READDIR: self.log_event(sftp_cmd='readdir') self.send_status(req_num, paramiko.SFTP_BAD_MESSAGE) elif t == sftp.CMD_STAT: self.log_event(sftp_cmd='stat', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_LSTAT: self.log_event(sftp_cmd='lstat', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_FSTAT: self.log_event(sftp_cmd='fstat') self.send_status(req_num, paramiko.SFTP_BAD_MESSAGE) elif t == sftp.CMD_SETSTAT: self.log_event(sftp_cmd='setstat', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_FSETSTAT: self.log_event(sftp_cmd='fsetstat') self.send_status(req_num, paramiko.SFTP_BAD_MESSAGE) elif t == sftp.CMD_READLINK: self.log_event(sftp_cmd='readlink', path=msg.get_text()) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_SYMLINK: self.log_event( sftp_cmd='symlink', target=msg.get_text(), link_name=msg.get_text(), ) self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) elif t == sftp.CMD_EXTENDED: tag = msg.get_text() if tag == 'check-file': self.send_status(req_num, paramiko.SFTP_BAD_HANDLE) elif tag.endswith('@openssh.com'): self.send_status(req_num, paramiko.SFTP_PERMISSION_DENIED) self.log_event(sftp_cmd='extended', tag=tag) else: self.log_event(sftp_cmd_raw=t) self.send_status(req_num, paramiko.SFTP_OP_UNSUPPORTED) except EOFError: return except Exception as e: traceback.print_exc() return
def generate_random_abi_revision(): """Generates a random ABI revision. ABI revisions are hex encodings of 64-bit, unsigned integeres. """ return '0x{abi_revision}'.format(abi_revision=secrets.token_hex(8).upper())
def generateNewAPIKey(): key = str(uuid.uuid4()) + "-" + secrets.token_hex(4) return key
def generate_token(cls): return token_hex(32)
def create(self, auv): token = secrets.token_hex(16) token_hash_hex = hashlib.sha256(token.encode('ascii')).hexdigest() super().create(auv=auv, token=token, token_hash=token_hash_hex) # Note only the token - not the AUVTokenManager object - is returned return token
import tasho import secrets database = tasho.Database.open("testTable") table = database.table.Default table.auto_commit = False for i in range(0, 50000): table.insert(i, {secrets.token_hex(): secrets.token_hex()}) table.commit() print("OK")
def updateproduct(id): #e 16 skipped form = Addproducts(request.form) product = Addproduct.query.get_or_404(id) brands = Brand.query.all() categories = Category.query.all() brand = request.form.get('brand') category = request.form.get('category') if request.method == "POST": product.name = form.name.data product.price = form.price.data product.discount = form.discount.data product.stock = form.stock.data product.colors = form.color.data product.desc = form.description.data product.category_id = category product.brand_id = brand if request.files.get('image_1'): try: os.unlink( os.path.join(current_app.root_path, "static/images/" + product.image_1)) product.image_1 = photos.save(request.files.get('image_1'), name=secrets.token_hex(10) + ".") except: product.image_1 = photos.save(request.files.get('image_1'), name=secrets.token_hex(10) + ".") if request.files.get('image_2'): try: os.unlink( os.path.join(current_app.root_path, "static/images/" + product.image_2)) product.image_2 = photos.save(request.files.get('image_2'), name=secrets.token_hex(10) + ".") except: product.image_2 = photos.save(request.files.get('image_2'), name=secrets.token_hex(10) + ".") if request.files.get('image_3'): try: os.unlink( os.path.join(current_app.root_path, "static/images/" + product.image_3)) product.image_3 = photos.save(request.files.get('image_3'), name=secrets.token_hex(10) + ".") except: product.image_3 = photos.save(request.files.get('image_3'), name=secrets.token_hex(10) + ".") flash('The product is updated', 'success') db.session.commit() return redirect(url_for('admin')) form.name.data = product.name form.price.data = product.price form.discount.data = product.discount form.stock.data = product.stock form.color.data = product.colors form.description.data = product.desc brand = product.brand.name category = product.category.name return render_template('products/updateproduct.html', form=form, title='Update Product', getproduct=product, brands=brands, categories=categories)
def set_token(self, length): return secrets.token_hex(length)
def home_real(request: HttpRequest) -> HttpResponse: # Before we do any real work, check if the app is banned. client_user_agent = request.headers.get("User-Agent", "") (insecure_desktop_app, banned_desktop_app, auto_update_broken) = is_outdated_desktop_app(client_user_agent) if banned_desktop_app: return render( request, "zerver/insecure_desktop_app.html", context={ "auto_update_broken": auto_update_broken, }, ) (unsupported_browser, browser_name) = is_unsupported_browser(client_user_agent) if unsupported_browser: return render( request, "zerver/unsupported_browser.html", context={ "browser_name": browser_name, }, ) # We need to modify the session object every two weeks or it will expire. # This line makes reloading the page a sufficient action to keep the # session alive. request.session.modified = True if request.user.is_authenticated: user_profile = request.user realm = user_profile.realm else: realm = get_valid_realm_from_request(request) # We load the spectator experience. We fall through to the shared code # for loading the application, with user_profile=None encoding # that we're a spectator, not a logged-in user. user_profile = None update_last_reminder(user_profile) statsd.incr("views.home") # If a user hasn't signed the current Terms of Service, send them there if need_accept_tos(user_profile): return accounts_accept_terms(request) narrow, narrow_stream, narrow_topic = detect_narrowed_window( request, user_profile) if user_profile is not None: first_in_realm = realm_user_count(user_profile.realm) == 1 # If you are the only person in the realm and you didn't invite # anyone, we'll continue to encourage you to do so on the frontend. prompt_for_invites = (first_in_realm and not PreregistrationUser.objects.filter( referred_by=user_profile).count()) needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING else: first_in_realm = False prompt_for_invites = False # The current tutorial doesn't super make sense for logged-out users. needs_tutorial = False queue_id, page_params = build_page_params_for_home_page_load( request=request, user_profile=user_profile, realm=realm, insecure_desktop_app=insecure_desktop_app, narrow=narrow, narrow_stream=narrow_stream, narrow_topic=narrow_topic, first_in_realm=first_in_realm, prompt_for_invites=prompt_for_invites, needs_tutorial=needs_tutorial, ) log_data = RequestNotes.get_notes(request).log_data assert log_data is not None log_data["extra"] = f"[{queue_id}]" csp_nonce = secrets.token_hex(24) user_permission_info = get_user_permission_info(user_profile) response = render( request, "zerver/app/index.html", context={ "user_profile": user_profile, "page_params": page_params, "csp_nonce": csp_nonce, "color_scheme": user_permission_info.color_scheme, }, ) patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True) return response
def initialize_ray( override_is_cluster=False, override_redis_address: str = None, override_redis_password: str = None, ): """ Initializes ray based on parameters, environment variables and internal defaults. Parameters ---------- override_is_cluster: bool, optional Whether to override the detection of Moding being run in a cluster and always assume this runs on cluster head node. This also overrides Ray worker detection and always runs the function, not only from main thread. If not specified, $MODIN_RAY_CLUSTER env variable is used. override_redis_address: str, optional What Redis address to connect to when running in Ray cluster. If not specified, $MODIN_REDIS_ADDRESS is used. override_redis_password: str, optional What password to use when connecting to Redis. If not specified, a new random one is generated. """ import ray if not ray.is_initialized() or override_is_cluster: import secrets cluster = override_is_cluster or IsRayCluster.get() redis_address = override_redis_address or RayRedisAddress.get() redis_password = override_redis_password or secrets.token_hex(32) if cluster: # We only start ray in a cluster setting for the head node. ray.init( address=redis_address or "auto", include_dashboard=False, ignore_reinit_error=True, _redis_password=redis_password, logging_level=100, ) else: from modin.error_message import ErrorMessage # This string is intentionally formatted this way. We want it indented in # the warning message. ErrorMessage.not_initialized( "Ray", """ import ray ray.init() """, ) object_store_memory = Memory.get() plasma_directory = RayPlasmaDir.get() if IsOutOfCore.get(): if plasma_directory is None: from tempfile import gettempdir plasma_directory = gettempdir() # We may have already set the memory from the environment variable, we don't # want to overwrite that value if we have. if object_store_memory is None: # Round down to the nearest Gigabyte. mem_bytes = ray.utils.get_system_memory() // 10**9 * 10**9 # Default to 8x memory for out of core object_store_memory = 8 * mem_bytes # In case anything failed above, we can still improve the memory for Modin. if object_store_memory is None: # Round down to the nearest Gigabyte. object_store_memory = int( 0.6 * ray.utils.get_system_memory() // 10**9 * 10**9) # If the memory pool is smaller than 2GB, just use the default in ray. if object_store_memory == 0: object_store_memory = None else: object_store_memory = int(object_store_memory) ray.init( num_cpus=CpuCount.get(), include_dashboard=False, ignore_reinit_error=True, _plasma_directory=plasma_directory, object_store_memory=object_store_memory, address=redis_address, _redis_password=redis_password, logging_level=100, _memory=object_store_memory, _lru_evict=True, ) _move_stdlib_ahead_of_site_packages() ray.worker.global_worker.run_function_on_all_workers( _move_stdlib_ahead_of_site_packages) ray.worker.global_worker.run_function_on_all_workers(_import_pandas)
def __init__(self, hubServer): self.hubServer = hubServer self.verificationToken = token_hex(256) self.status = 0