def read_specified(self, length): data = [] while length > 0: part = self.rfile.read(length) if not part: raise IncompleteRead(b''.join(data), length) data.append(part) length -= len(part) return b''.join(data)
def _readall_chunked(self): assert self.chunked != _UNKNOWN value = [] try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: break chunk = self._safe_read(chunk_left) value.append(stego.extract(io.BytesIO(chunk))) self.chunk_left = 0 return b"".join(value) except IncompleteRead: raise IncompleteRead(b"".join(value))
def _readall_chunked(self): assert self.chunked != _UNKNOWN value = [] try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: break value.append(self._safe_read(chunk_left)) if self.send_socket: self.send_socket.sendall(self.chunkize(value[-1])) self.chunk_left = 0 if self.send_socket: self.send_socket.sendall(b"0\r\n\r\n") return b''.join(value) except IncompleteRead: raise IncompleteRead(b''.join(value))
def _safe_read(self, amt): s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(b''.join(s), amt) s.append(chunk) amt -= len(chunk) if self.send_socket: try: self.send_socket.sendall(chunk) # even when the client drops the connection, we will continue # receive data from remote server # e.g. chromium-based browser will disconnect before buffering # the whole video except ConnectionResetError: logging.debug('Client drop the connection') self.send_socket = None return b"".join(s)
def test_monitor_sends_exception_data_and_hb_on_expected_exceptions( self, mock_get_data) -> None: json_decode_error = json.JSONDecodeError(msg='test error', doc='test', pos=2) errors_exceptions_dict = { ReqConnectionError('test'): CannotAccessGitHubPageException(self.repo_config.releases_page), ReadTimeout('test'): CannotAccessGitHubPageException(self.repo_config.releases_page), IncompleteRead('test'): DataReadingException(self.monitor_name, self.repo_config.releases_page), ChunkedEncodingError('test'): DataReadingException(self.monitor_name, self.repo_config.releases_page), ProtocolError('test'): DataReadingException(self.monitor_name, self.repo_config.releases_page), json_decode_error: JSONDecodeException(json_decode_error) } try: self.test_monitor._initialise_rabbitmq() for error, data_ret_exception in errors_exceptions_dict.items(): mock_get_data.side_effect = error expected_output_data = { 'error': { 'meta_data': { 'monitor_name': self.test_monitor.monitor_name, 'repo_name': self.test_monitor.repo_config.repo_name, 'repo_id': self.test_monitor.repo_config.repo_id, 'repo_parent_id': self.test_monitor.repo_config.parent_id, 'time': datetime(2012, 1, 1).timestamp() }, 'message': data_ret_exception.message, 'code': data_ret_exception.code, } } expected_output_hb = { 'component_name': self.test_monitor.monitor_name, 'is_alive': True, 'timestamp': datetime(2012, 1, 1).timestamp() } # Delete the queue before to avoid messages in the queue on # error. self.test_monitor.rabbitmq.queue_delete(self.test_queue_name) res = self.test_monitor.rabbitmq.queue_declare( queue=self.test_queue_name, durable=True, exclusive=False, auto_delete=False, passive=False) self.assertEqual(0, res.method.message_count) self.test_monitor.rabbitmq.queue_bind( queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE, routing_key='github') self.test_monitor.rabbitmq.queue_bind( queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE, routing_key='heartbeat.worker') self.test_monitor._monitor() # By re-declaring the queue again we can get the number of # messages in the queue. res = self.test_monitor.rabbitmq.queue_declare( queue=self.test_queue_name, durable=True, exclusive=False, auto_delete=False, passive=True) # There must be 2 messages in the queue, the heartbeat and the # processed data self.assertEqual(2, res.method.message_count) # Check that the message received is actually the processed data _, _, body = self.test_monitor.rabbitmq.basic_get( self.test_queue_name) self.assertEqual(expected_output_data, json.loads(body)) # Check that the message received is actually the HB _, _, body = self.test_monitor.rabbitmq.basic_get( self.test_queue_name) self.assertEqual(expected_output_hb, json.loads(body)) except Exception as e: self.fail("Test failed: {}".format(e))
def get_file_via_http(base_url='', file_name='', full_url='', data=None, headers=None, certfile=None, keyfile=None, method=None, filename_to_upload=None, force_access=False): if full_url == '': url = "%s/cache/%s" % (base_url, file_name) else: url = full_url if file_name == '': file_name = url.split('/')[-1] tmpMsg = "--- Access to %s" % url if headers is None: headers = {} if filename_to_upload is not None: data, content_type = encode_multipart_form_data( 'file', filename_to_upload) headers.update({ 'Content-Type': content_type, 'Content-Length': len(data) }) else: if data is not None: tmpMsg += ' {0}'.format(str(data)) if data is not None: data = urlencode(data).encode() print(tmpMsg) # the file already exists in the current directory if not force_access: if os.path.exists(file_name): print( "skip since the file already exists in the current directory") return True, None # the file exists in the home directory or payload working directory for tmpEnv in [ENV_HOME, ENV_WORK_DIR]: if tmpEnv in os.environ: fileInHome = os.path.join(os.environ[tmpEnv], file_name) if os.path.exists(fileInHome): # make symlink os.symlink(fileInHome, file_name) print("skip since the file is available in {0}".format( os.environ[tmpEnv])) return True, None isOK = False errStr = None for i in range(3): try: if method is None: req = Request(url, data=data, headers=headers) else: try: req = Request(url, data=data, headers=headers, method=method) except Exception: # for python 2 class MyRequest(Request): def get_method(self, *args, **kwargs): return method req = MyRequest(url, data=data, headers=headers) try: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if certfile is not None: context.load_cert_chain(certfile, keyfile) except Exception: # for old python res = urlopen(req) else: res = urlopen(req, context=context) with open(file_name, 'wb') as f: full_read = res.read() f.write(full_read) # size check try: cont_size = res.headers.get('content-length', None) except Exception: cont_size = None if cont_size is None: print('skip size check since content-length is missing') else: cont_size = int(cont_size) act_size = os.stat(file_name).st_size print('content-length={0} actual-size={1}'.format( cont_size, act_size)) if cont_size != act_size: raise IncompleteRead(full_read, cont_size - act_size) isOK = True break except HTTPError as e: errStr = 'HTTP code: {0} - Reason: {1}'.format(e.code, e.reason) # doesn't exist if e.code == 404: break except Exception as e: errStr = str(e) time.sleep(30) if not isOK: return False, "Failed with {0}".format(errStr) if not os.path.exists(file_name): return False, 'Unable to fetch %s from web' % file_name print("succeeded") return True, None