def create_discovery(self, discovery, scope, api_version): """ Create Google Cloud API discovery object and perform authentication. :param discovery: name of the API discovery to be created :param scope: scope the API discovery will have :param api_version: version of the API :return: discovery object :raise: GCPError if there is a problem with service account JSON file: e.g. the file is not under the given path or it has wrong permissions """ # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() if hasattr(self.auth, 'get'): creds_func = ServiceAccountCredentials.from_json_keyfile_dict else: creds_func = ServiceAccountCredentials.from_json_keyfile_name try: credentials = creds_func(self.auth, scopes=scope) http = httplib2.Http() credentials.authorize(http) return build(discovery, api_version, http=http) except IOError as e: self.logger.error(str(e)) raise GCPError(str(e))
def try_mine_block(args): atfork() solve_block(*args) # Send to the server print core, "- Solved block." add_block(new_block, BLOCK_CONTENTS) return "SUCCESS"
def handle(self): atfork() req = self.request signal.alarm(60) def recvline(): buf = "" while not buf.endswith("\n"): buf += req.recv(1) return buf proof_of_work(req) signal.alarm(120) req.sendall(msg) req.sendall("Encrypted Flag: {}\n".format(enc_flag)) while True: req.sendall("Give a ciphertext: ") x = long(recvline()) m = decrypt(x, p, q) if m == None: m = 0 req.sendall("lsb is {}\n".format(m % 2)) req.close()
def decrypt(self, value: typing.Union[str, bytes]) -> str: if isinstance(value, str): value = value.encode('utf-8') data: bytes = typing.cast(bytes, encoders.decode(value, 'base64')) decrypted: bytes try: # First, try new "cryptografy" decrpypting decrypted = self._rsa.decrypt( data, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None)) except Exception: # If fails, try old method try: atfork() decrypted = self._oldRsa.decrypt( encoders.decode(value, 'base64')) return decrypted.decode() except Exception: logger.exception('Decripting: %s', value) # logger.error(inspect.stack()) return 'decript error' # logger.debug('Decripted: %s %s', data, decrypted) return decrypted.decode()
def decrypt(self, value: str) -> str: data: bytes = codecs.decode(value.encode(), 'base64') try: # First, try new "cryptografy" decrpypting decrypted: bytes = self._rsa.decrypt( data, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None, ), ) except Exception: # If fails, try old method try: atfork() decrypted = self._oldRsa.decrypt( codecs.decode(value.encode(), 'base64')) return decrypted.decode() except Exception: logger.exception('Decripting: %s', value) # logger.error(inspect.stack()) return 'decript error' # logger.debug('Decripted: %s %s', data, decrypted) return decrypted.decode()
def handle(s): atfork() signal.alarm(1200) req = s.request def write(m): req.sendall(m) def writeline(m): write(m + "\n") def readline(): buf = "" while True: buf += req.recv(1) if buf.endswith("\n"): break return buf proof_of_work(req) writeline("Welcome to the Flag Decryption Challenge!") write("Generating the key...") pk, sk = gen_key() writeline("Done.") writeline("Public key is here: (%d, %d)" % pk) writeline("...and Encrypted Flag: %d" % encrypt(pk, FLAG)) while True: writeline("Your ciphertext here: ") c = long(readline()) m = decrypt(pk, sk, c) writeline("LSB is %d" % (m & 1))
def downhill(p, g, A): atfork() p, g, A, b = int(p), int(g), int(A), getPrime(16) B = pow(g, b, p) if B < 2 or B > p - 1 or pow(B, (p - 1) // 2, p) != 1: return downhill(p, g, A) return B, pow(A, b, p)
def create_discovery(self, discovery, scope, api_version): """ Create Google Cloud API discovery object and perform authentication. :param discovery: name of the API discovery to be created :param scope: scope the API discovery will have :param api_version: version of the API :return: discovery object :raise: GCPError if there is a problem with service account JSON file: e.g. the file is not under the given path or it has wrong permissions """ # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() if hasattr(self.auth, 'get'): creds_func = ServiceAccountCredentials.from_json_keyfile_dict else: creds_func = ServiceAccountCredentials.from_json_keyfile_name try: credentials = creds_func( self.auth, scopes=scope) http = httplib2.Http() credentials.authorize(http) return build(discovery, api_version, http=http) except IOError as e: self.logger.error(str(e)) raise GCPError(str(e))
def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor(self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult( self._host, self._task, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) display.debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) display.debug("WORKER PROCESS EXITING")
def sign_request(self, seckey64, data): """Sign data using public/private key signature. Signature algorithm used is RSA. Hash algorithm is SHA256. :param seckey64: secret key encoded in base64 :parad data: data to sign :return: data signature :rtype: str """ try: if current_process().ident != self.pid: atfork() # import key seckey = binascii.a2b_base64(seckey64) key = RSA.importKey(seckey) # create data hash hash_data = SHA256.new(data) #.digest() #self.logger.debug('Get data: %s' % data) #self.logger.debug('Created hash: %s' % binascii.b2a_base64(hash_data.digest())) # sign data signer = PKCS1_v1_5.new(key) signature = signer.sign(hash_data) # encode signature in base64 #signature64 = binascii.b2a_base64(signature) signature64 = binascii.b2a_hex(signature) return signature64 except Exception as ex: self.logger.error(ex, exc_info=1) raise BeehiveApiClientError(u'Error signing data: %s' % data, code=401)
def backup(self): while True: try: x = self.queue.get_nowait() self.log.info("get_nowait : {0}".format(x)) break #things are notmal just do another back aafter #waiting for self.interval except Exception: master = self.servers[0] rest = RestConnection(master) nodes = rest.node_statuses() map = self.node_server_map(nodes, self.servers) self.log.info("cluster has {0} nodes".format(len(nodes))) for node in nodes: try: from Crypto.Random import atfork atfork() BackupHelper(map[node]).backup('default', "/tmp") BackupHelper(map[node]).backup('default', "/tmp") except Exception as ex: print ex self.log.info("backed up the data into ") time.sleep(self.interval)
def get_expirations(keylist): """ This function is not implemented in GPG object class because need to operate on the whole keys """ atfork() try: temp_gpgroot = os.path.join(GLSetting.gpgroot, "-expiration_check-%s" % random.randint(0, 0xFFFF) ) os.makedirs(temp_gpgroot, mode=0700) gpexpire= GPG(gnupghome=temp_gpgroot, options="--trust-model always") except Exception as excep: log.err("Unable to setup expiration check environment: %s" % excep) raise excep try: for key in keylist: gpexpire.import_keys(key) except Exception as excep: log.err("Error in GPG import_keys: %s" % excep) raise excep try: all_keys = gpexpire.list_keys() except Exception as excep: log.err("Error in GPG list_keys: %s" % excep) raise excep expirations = {} for ak in all_keys: expirations.update({ ak['fingerprint'] : ak['date']}) return expirations
def test_negotiate(self, group=14): server = socket.socket() server.bind(('', 0)) server.listen(1) port = server.getsockname()[1] pid = os.fork() atfork() # child process - aka, the server if pid == 0: sock, _ = server.accept() server.close() # parent - aka, the client else: sock = socket.socket() sock.connect(('', port)) server.close() alice = pyDHE.new(group) local_key = alice.negotiate(sock) #sock.close() if pid == 0: sock.send(long_to_bytes(local_key)) sock.close() else: os.wait() remote_key = bytes_to_long(sock.recv(1024)) sock.close() self.assertEqual(local_key, remote_key, "keys do not match")
def gen_ssh_keys(email): atfork() tmp_file = '/tmp/id_rsa_{0}.tmp'.format(Util.random_string(12)) key = paramiko.RSAKey.generate(bits=2048) key.write_private_key_file(tmp_file) key_text = open(tmp_file).read() pub = "{0} {1} {2}".format(key.get_name(), key.get_base64(), email) return key_text, pub
def encrypt(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') atfork() return encoders.encode((self._rsa.encrypt(value, six.b(''))[0]), 'base64', asText=True)
def encrypt(self, value: typing.Union[str, bytes]) -> str: if isinstance(value, str): value = value.encode('utf-8') atfork() return encoders.encode((self._rsa.encrypt(value, b'')[0]), 'base64', asText=True)
def new_collection_mapping(original_collection): """ CiphertextCollectionMapping.new wrapper, to be used with multiprocessing module. """ from Crypto.Random import atfork atfork() return CiphertextCollectionMapping.new(original_collection)
def encrypt(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') atfork() return six.text_type( codecs.encode( self._rsa.encrypt(value, six.b(''))[0], CryptoManager.CODEC))
def unset(self,encrypted,**kwargs): atfork() self._set_eparams(**kwargs) encrypted = base64.b64decode(encrypted) iv = encrypted[:self._block_size] aes = AES.new(str(self.passphrase), AES.MODE_CFB, iv) return aes.decrypt(encrypted[self._block_size:])
def set(self,_str,**kwargs): atfork() self._set_eparams(**kwargs) aes = AES.new(str(self.passphrase), AES.MODE_CFB, self.iv) return base64.b64encode(self.iv + aes.encrypt(_str.encode('ascii', 'ignore')))
def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' #import cProfile, pstats, StringIO #pr = cProfile.Profile() #pr.enable() if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(failed=True, exception=to_text(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_text(e)) display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc())) display.debug("WORKER PROCESS EXITING")
def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' #import cProfile, pstats, StringIO #pr = cProfile.Profile() #pr.enable() if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, executor_result) # put the result on the result queue display.debug("sending task result") self._rslt_q.put(task_result) display.debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host.name, self._task._uuid, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) display.debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) display.debug("WORKER PROCESS EXITING")
def get_credentials(self, scope): # Crypto.Random.atfork() must be called here because celery doesn't do # it atfork() if hasattr(self.auth, 'get'): creds_func = ServiceAccountCredentials.from_json_keyfile_dict else: creds_func = ServiceAccountCredentials.from_json_keyfile_name return creds_func(self.auth, scopes=scope)
def _encrypt(self, plain): """Encrypt a plaintext string using self.key as the key""" atfork() # A seed for PyCrypto iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16)) crypto = AES.new(self.key, AES.MODE_CBC, iv) if len(plain) % 16 != 0: plain += " " * (16 - len(plain) % 16) else: # Always pad so we can detect properly decrypted files :) plain += " " * 16 return iv + crypto.encrypt(plain)
def get_credentials(self, scope): # check # run: gcloud beta auth application-default login # look to ~/.config/gcloud/application_default_credentials.json atfork() if hasattr(self.auth, 'get'): creds_func = _JWTAccessCredentials.from_json_keyfile_dict else: creds_func = _JWTAccessCredentials.from_json_keyfile_name return creds_func(self.auth, scopes=self.scope)
def decrypt(self, value: typing.Union[str, bytes]) -> str: if isinstance(value, str): value = value.encode('utf-8') # import inspect try: atfork() return str(self._rsa.decrypt(encoders.decode(value, 'base64')).decode('utf-8')) except Exception: logger.exception('Decripting: %s', value) # logger.error(inspect.stack()) return 'decript error'
def run(self): ''' Called when the process is started, and loops indefinitely until an error is encountered (typically an IOerror from the queue pipe being disconnected). During the loop, we attempt to pull tasks off the job queue and run them, pushing the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' if HAS_ATFORK: atfork() try: # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, ).run() debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, executor_result) # put the result on the result queue debug("sending task result") self._rslt_q.put(task_result) debug("done sending task result") except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(unreachable=True)) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult(self._host, self._task, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) debug("WORKER PROCESS EXITING")
def decrypt(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') # import inspect try: atfork() return six.text_type(self._rsa.decrypt(value.decode(CryptoManager.CODEC)).decode('utf-8')) except Exception: logger.exception('Decripting: {0}'.format(value)) # logger.error(inspect.stack()) return 'decript error'
def on_process_worker_init(*args, **kwargs): #IGNORE:W0613 ''' Ran at celery worker init. ''' from labinventory.notifications import email email.connect_signals() try: from Crypto.Random import atfork atfork() except ImportError: pass
def run_action_on_master(action, master): atfork() try: action_func = getattr(master_fabric, action) with settings(host_string=master['hostname']): action_func(master) return True except: import traceback print "Failed to run", action, "on", master['name'] print traceback.format_exc() return False
def decrypt(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') # import inspect try: atfork() return six.text_type( self._rsa.decrypt(value.decode( CryptoManager.CODEC)).decode('utf-8')) except Exception: logger.exception('Decripting: {0}'.format(value)) # logger.error(inspect.stack()) return 'decript error'
def _executor_hook(job_queue, result_queue): # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 # this function also not present in CentOS 6 if HAS_ATFORK: atfork() signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: host = job_queue.get(block=False) result_queue.put(multiprocessing_runner._executor(host)) except Queue.Empty: pass except: traceback.print_exc()
def _createIV(): """Create a 16-byte initialization vector. @return: a C{str} initialization vector. """ try: return randpool.RandomPool(512).get_bytes(16) except AssertionError: # An AssertionError can come from Crypto/Random/_UserFriendlyRNG.py, # which produces an error "PID check failed. RNG must be re-initialized # after fork(). Hint: Try Random.atfork()". This seems to only happen # when running locally (in development mode). atfork() return randpool.RandomPool(512).get_bytes(16)
def _block_check(self, input_queue, output_val, num_blocks, passwd, PRIVkey): atfork() # https://stackoverflow.com/questions/16981503/pycrypto-assertionerrorpid-check-failed-rng-must-be-re-initialized-after-fo global found_block for i in iter(input_queue.get, None): if found_block.is_set(): break # Get RSA block and its hash rsa_block = b"".join([self.blocks[i+j] for j in range(num_blocks)]) try: block_hash = SHA256.new(PRIVkey.decrypt(rsa_block) + bytes(passwd, encoding = 'utf-8')).digest() except ValueError: continue if SHA.new(block_hash).digest() == self.info['challenge']: output_val.value = i found_block.set()
def createKey(keySize=32): """Create a random key. @param keySize: a positive C{int} key length. @return: a random string of length C{keySize}. """ try: return randpool.RandomPool(512).get_bytes(keySize) except AssertionError: # An AssertionError can come from Crypto/Random/_UserFriendlyRNG.py, # which produces an error "PID check failed. RNG must be re-initialized # after fork(). Hint: Try Random.atfork()". This seems to only happen # when running locally (in development mode). atfork() return randpool.RandomPool(512).get_bytes(keySize)
def run_action_on_foopy(action, foopy): atfork() try: action_func = getattr(foopy_fabric, action) with settings(host_string="%s.build.mozilla.org" % foopy): action_func(foopy) return True except AttributeError: print FAIL, "[%s] %s action is not defined." % (foopy, action) return False except: import traceback print "Failed to run", action, "on", foopy print traceback.format_exc() return False
def run_action_on_master(action, master): atfork() try: action_func = getattr(util.fabric.actions, "action_%s" % action) with settings(host_string=master.get('ip_address', master['hostname'])): action_func(master) return True except AttributeError: print "[%s] %s action is not defined." % (master['hostname'], action) return False except: import traceback print "Failed to run", action, "on", master['name'] print traceback.format_exc() return False
def run_action_on_master(action, master): atfork() try: action_func = getattr(util.fabric.actions, "action_%s" % action) with settings( host_string=master.get('ip_address', master['hostname'])): action_func(master) return True except AttributeError: print "[%s] %s action is not defined." % (master['hostname'], action) return False except: import traceback print "Failed to run", action, "on", master['name'] print traceback.format_exc() return False
def open(self, timeout = 120): key = paramiko.RSAKey.from_private_key_file(self.key_path) connected = False t_start = time.time() while not connected: try: atfork() self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.client.connect(self.hostname, self.port, self.username, timeout=5, pkey=key) connected = True except Exception, e: t_now = time.time() if t_now - t_start > timeout: raise e else: time.sleep(2)
def run_action_on_devices(action, foopy_dict): atfork() action_func = getattr(foopy_fabric, action) try: with settings(host_string="%s.build.mozilla.org" % foopy_dict['host']): for device in foopy_dict['devices']: action_func(device) return True return False except AttributeError: print FAIL, "[%s] %s action is not defined." % (foopy_dict['host'], action) return False except: import traceback print "Failed to run", action, "on", foopy_dict['host'] print traceback.format_exc() return False
def get_key_and_internalname(self, public_id): data = DBHandler.get_key_and_internalname(self, public_id) if data: try: # AES keys are 16 bytes (hex) if len(data['aeskey']) != 32: raise ValueError else: int(data['aeskey'], 16) except ValueError: # Re-initialize RNG to deal with forks in uwsgi atfork() # Encrypted ciphertext with base64 encoding ciphertext = base64.b64decode(data['aeskey']) data['aeskey'] = CRYPTER.decrypt(ciphertext) return data
def _executor_hook(job_queue, result_queue): """ callback used by multiprocessing pool """ # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 # does not occur for everyone, some claim still occurs on newer paramiko # this function not present in CentOS 6 if HAS_ATFORK: atfork() signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: job = job_queue.get(block=False) runner, host = job result_queue.put(runner._executor(host)) except Queue.Empty: pass except: traceback.print_exc()
def _executor_hook(job_queue, result_queue, new_stdin): # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 # this function also not present in CentOS 6 if HAS_ATFORK: atfork() signal.signal(signal.SIGINT, signal.SIG_IGN) while not job_queue.empty(): try: host = job_queue.get(block=False) return_data = multiprocessing_runner._executor(host, new_stdin) result_queue.put(return_data) if 'LEGACY_TEMPLATE_WARNING' in return_data.flags: # pass data back up across the multiprocessing fork boundary template.Flags.LEGACY_TEMPLATE_WARNING = True except Queue.Empty: pass except: traceback.print_exc()
def __init__(self, receiver_desc): """ every time is needed, a new keyring is created here. """ atfork() if receiver_desc.has_key('gpg_key_status') and \ receiver_desc['gpg_key_status'] != Receiver._gpg_types[1]: # Enabled log.err("Requested GPG initialization for a receiver without GPG configured! %s" % receiver_desc['username']) raise Exception("Requested GPG init for user without GPG [%s]" % receiver_desc['username']) try: temp_gpgroot = os.path.join(GLSetting.gpgroot, "%s" % random.randint(0, 0xFFFF) ) os.makedirs(temp_gpgroot, mode=0700) self.gpgh = GPG(gnupghome=temp_gpgroot, options="--trust-model always") except Exception as excep: log.err("Unable to instance GPG object: %s" % excep) raise excep self.receiver_desc = receiver_desc log.debug("GPG object initialized for receiver %s" % receiver_desc['username'])
def dump_file_fs(uploaded_file): """ @param files: a file @return: three variables: #0 a filepath linking the filename with the random filename saved in the disk #1 SHA256 checksum of the file #3 size in bytes of the files """ from Crypto.Random import atfork atfork() saved_name = rstr.xeger(r"[A-Za-z]{26}") filelocation = os.path.join(GLSetting.submission_path, saved_name) log.debug( "Start saving %d bytes from file [%s]" % (uploaded_file["body_len"], uploaded_file["filename"].encode("utf-8")) ) # checksum is computed here, because don't slow down the operation # enough to postpone in a scheduled job. # https://github.com/globaleaks/GlobaLeaks/issues/600 sha = SHA256.new() with open(filelocation, "w+") as fd: uploaded_file["body"].seek(0, 0) data = uploaded_file["body"].read() # 4kb total_length = 0 while data != "": total_length = total_length + len(data) sha.update(data) os.write(fd.fileno(), data) data = uploaded_file["body"].read(4096) # 4kb return (saved_name, sha.hexdigest(), total_length)
def handle(self): atfork() req = self.request req.sendall(msg) username = req.recv(512)[:-1] if username not in permitted_users: req.sendall('Sorry, not permitted.\n') req.close() return public_client = int(req.recv(512).strip('\n'), 16) % N c = (public_client * permitted_users[username][1]) % N if c in [N-g, N-1, 0, 1, g]: req.sendall('Sorry, not permitted.\n') req.close() return random_server = random.randint(2, N-3) public_server = pow(g, random_server, N) residue = (public_server + permitted_users[username][1]) % N req.sendall(tostr(permitted_users[username][0]) + '\n') req.sendall(tostr(residue) + '\n') session_secret = (public_client * permitted_users[username][1]) % N session_secret = pow(session_secret, random_server, N) session_key = H(tostr(session_secret)) proof = req.recv(512).strip('\n') if (proof != H(tostr(residue) + session_key)): req.sendall('Sorry, not permitted.\n') req.close() return our_verifier = H(tostr(public_client) + session_key) req.sendall(our_verifier + '\n') req.sendall('Congratulations! The flag is ' + flag + '\n') req.close()
def _multirun( self, command, settings_list, shell, pty, combine_stderr, dir, format, warn_only, condensed, quiet_exit, laggards_timeout, wait_for ): callable_command = hasattr(command, '__call__') done = 0 idx = 0 ctx = self.ctx processes = {} total = len(settings_list) pool_size = env.multirun_pool_size socket_path = '/tmp/fab.%s' % uuid4() server = socket(AF_UNIX, SOCK_STREAM) server.bind(socket_path) server.listen(pool_size) for client_id in range(min(pool_size, total)): from_parent, to_child = pipe() pid = fork() if pid: processes[client_id] = [from_parent, to_child, pid, idx] idx += 1 write(to_child, pack('H', idx)) else: atfork() def die(*args): if quiet_exit: output.status = False sys.exit() signal(SIGALRM, die) if condensed: sys.__ori_stdout__ = sys.stdout sys.__ori_stderr__ = sys.stderr sys.stdout = sys.stderr = dev_null while 1: alarm(env.multirun_child_timeout) data = read(from_parent, index_header_size) alarm(0) idx = unpack('H', data)[0] - 1 if idx == -1: die() try: if callable_command: with settings( ctx=ctx, warn_only=warn_only, **settings_list[idx] ): try: response = command() except Exception, error: handle_failure(command, warn_only) response = error else: with settings( ctx=ctx, warn_only=warn_only, **settings_list[idx] ): response = run( command, shell, pty, combine_stderr, dir, format ) except BaseException, error: response = error client = socket(AF_UNIX, SOCK_STREAM) client.connect(socket_path) client.send(dumps((client_id, idx, response))) client.close()
def encrypt(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') atfork() return six.text_type(codecs.encode(self._rsa.encrypt(value, six.b(''))[0], CryptoManager.CODEC))
def handle(self): atfork() self.monies = 100 self.request.send(msg) self.request.send("\nBefore we begin, please send us an encrypted version of your public DSA key.\n") self.request.send("This will be used to ensure no one attempts to intercept our communications\n") pk = recv(self.request,4096) sig = self.process(pk) if not sig: self.request.send("There was an error processing your key. For your safety, we are aborting this session\n") return 0 self.request.send("We received your key as (%s), with our signature (%s)\n" % (str(self.client_pk.__getstate__()), ','.join(map(str,sig)))) self.request.send("Please verify this information before proceeding, to ensure your safety\n") time.sleep(0.5) self.request.send("Shall we play a game?\n") for _ in xrange(100): if self.monies >= 1000000000: self.request.send("Holy shit you have a lot of money. Here's a flag: XXXXXXXXXXXXXXX\n") while True: self.request.send("How much money would you like to bet? (You have $%d)\n"%self.monies) m = getn(self.request) if m > self.monies: self.request.send("You don't have that much money...\n") if m < 0: self.request.send(" :| \n") return 0 else: break self.monies -= m while True: self.request.send("At what odds would you like to play?\n") o = getn(self.request) if o > 1000000: self.request.send("Sorry, due to the expense of generating random numbers, please keep odds below 1000000\n") if o <= 0: self.request.send(" :| \n") return 0 else: break self.request.send("Alright, what is your encrypted guess for the prefix\n") g = recv(self.request, 4096) if not self.verify_guess(g): self.request.send("We had an issue validating your encrypted guess. Please ensure your connection is secure\n") return 0 self.request.send("Ok, generating the secure random number now....\n") time.sleep(0.2) self.request.send("Beep ") time.sleep(0.2) self.request.send("Boop ") time.sleep(0.2) self.request.send("Beep\n") time.sleep(0.2) r = int(os.urandom(5).encode("hex"),16) self.request.send("Okay, the secure RNG is %d\n"%r) self.request.send("Now what is your secret key?\n") k = recv(self.request, 4096) if not self.decrypt_guess(k): self.request.send("We had an issue verifying your encrypted guess...\n") self.request.send("Either your connection is insecure, or your trying to cheat us D:\n") return 0 if self.guess == r%o: self.request.send("Congratulations! You have won $%d!\n"%(o*m)) self.monies += o*m else: self.request.send("Sorry, %d %% %d != %d\n"%(r,o,self.guess))