def test_filter_frames(self): # Get all the protocols protocols = Dissector.get_implemented_protocols() # Filter on each protocol for protocol in protocols: # Filter on each protocol filtered, ignored = Frame.filter_frames(self.frames, protocol) # Get the id of frames with this protocol ids = self.frames_with_protocol(protocol) # Check the two datas received self.assertEqual(type(filtered), list) for f in filtered: self.assertEqual(type(f), Frame) self.assertEqual(type(ignored), list) for i in ignored: self.assertEqual(type(i), Frame) self.assertEqual(len(filtered) + len(ignored), len(self.frames)) # Check the length of filtered self.assertEqual(len(filtered), len(ids)) # Check that each element goes together for frame in filtered: dictionnary = frame.dict() self.assertIn(dictionnary['id'], ids)
def test___contains__(self): # Check the provided structure self.assertEqual(len(self.FRAMES_PROTOCOL), len(self.frames)) # Get all the protocols protocols = Dissector.get_implemented_protocols() # Check that the protocols are in the correct frame i = 1 for frame in self.frames: # Get the elements that shouldn't be in it should_not_be = self.list_diff(self.FRAMES_PROTOCOL[i], protocols) # Check that those which shouldn't be present really aren't for non_present in should_not_be: self.assertNotIn(non_present, frame) # Check that those which are present really are for prot in self.FRAMES_PROTOCOL[i]: self.assertIn(prot, frame) # Increment counter i += 1
def test_filter_frames_list_of_non_frame(self): # Get all the protocols protocols = Dissector.get_implemented_protocols() # Filter on each protocol for protocol in protocols: # Filter on none protocol with self.assertRaises(TypeError): filtered, ignored = Frame.filter_frames(protocols, protocol)
def test_filter_frames_only_single_frame(self): # Get all the protocols protocols = Dissector.get_implemented_protocols() # Filter on each protocol for protocol in protocols: # Filter on none protocol with self.assertRaises(InputParameterError): filtered, ignored = Frame.filter_frames( self.frames[0], protocol)
def test_filter_frames_list_of_frame_with_a_non_frame(self): # Get all the protocols protocols = Dissector.get_implemented_protocols() # Insert a non frame object self.frames.insert(1, protocols[1]) # Filter on each protocol for protocol in protocols: # Filter on none protocol with self.assertRaises(TypeError): filtered, ignored = Frame.filter_frames(self.frames, protocol)
def get_protocol( protocol: optional(str) = None ) -> either(OrderedDict, type(None)): """ Function to get the protocols :param protocol: The name of the protocol :type protocol: str :return: The implemented protocols, or a single one :rtype: OrderedDict """ # Get the global variable to store the protocols global PROTOCOLS # If empty for the moment if len(PROTOCOLS) == 0: # Put the 'None' protocol which get everything PROTOCOLS['None'] = OrderedDict() PROTOCOLS['None']['_type'] = 'implemented_protocol' PROTOCOLS['None']['name'] = 'None' PROTOCOLS['None']['description'] = '' # Getter of protocol's classes from dissector prot_classes = Dissector.get_implemented_protocols() # Build the clean results list for prot_class in prot_classes: # Put the protocol prot = OrderedDict() prot['_type'] = 'implemented_protocol' prot['name'] = prot_class.__name__ prot['description'] = '' # Add it to protocol variable PROTOCOLS[prot['name']] = prot # If there's only one protocol asked if protocol is not None: if protocol in PROTOCOLS: return PROTOCOLS[protocol] else: return None # Rerurn the protocols else: return PROTOCOLS
def do_POST(self): # The job counter global job_id job_id += 1 # ########################## ttproto API ########################### # # POST handler for the analyzer_testCaseAnalyze uri # It will allow users to analyze a pcap file corresponding to a TC # # \param pcap_file => The pcap file that we want to analyze # \param token => The token previously provided # \param testcase_id => The id of the corresponding test case # The pcap_file or the token is required, having both is also forbidden # if self.path == '/api/v1/analyzer_testCaseAnalyze': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the content type try: content_type = cgi.parse_header(self.headers['Content-Type']) except TypeError: self.api_error( "Non empty POST datas and format of 'multipart/form-data' expected" ) return # Get post values form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, keep_blank_values=True, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': content_type[0] }) # Check that we have the two values if any(( len(form) != 2, 'testcase_id' not in form, all(( # None of the two required => Error 'pcap_file' not in form, 'token' not in form)), all(( # Both of them => Error 'pcap_file' in form, 'token' in form)))): self.api_error( 'Expected POST=([pcap_file={file}|token={text}], testcase_id={text})' ) return # Get the test case and its informations testcase_id = form.getvalue('testcase_id') if not type(testcase_id) == str: self.api_error( 'The value of the testcase_id should be a string from text input' ) return # Try to get the test case try: test_case = get_test_cases(testcase_id) except FileNotFoundError: self.api_error('Test case %s not found' % testcase_id) return # Get the token token = form.getvalue('token') # Get analysis results from the token if token: # Just get the path pcap_path = os.path.join(TMPDIR, token + '.dump') # Get analysis results from the pcap file else: # Check headers if any((len(content_type) == 0, content_type[0] is None, content_type[0] != 'multipart/form-data')): self.api_error( "POST format of 'multipart/form-data' expected, no file input 'pcap_file' found" ) return # Get the same token or generate a new one token = get_token(token) # Get and check the pcap file entered pcap_file = form.getvalue('pcap_file') # Path to save the file pcap_path = os.path.join(TMPDIR, token + '.dump') # Write the pcap file to a temporary destination try: with open(pcap_path, 'wb') as f: f.write(pcap_file) except: self.api_error("Couldn't write the temporary file %s" % pcap_path) return # Get the dissection from analysis tool try: dissection = Dissector(pcap_path).dissect() except pure_pcapy.PcapError: self.api_error( "Expected 'pcap_file' to be a non empty pcap file") except: self.api_error("Couldn't read the temporary file %s" % pcap_path) return # Save the json dissection result into a file json_save = os.path.join(TMPDIR, token + '.json') try: with open(json_save, 'w') as f: json.dump(dissection, f) except: self.api_error("Couldn't write the json file") return # Get the result of the analysis analysis_results = Analyzer('tat_coap').analyse( pcap_path, testcase_id) # self.log_message("###############################################") # self.log_message("Verdict description is : %s", analysis_results[0][3]) # self.log_message("###############################################") # print(analysis_results) # Error for some test cases that the analysis doesn't manage to get try: assert type(analysis_results) == tuple assert len(analysis_results) == 5 assert type(analysis_results[0]) == str assert type(analysis_results[1]) == str assert type(analysis_results[2]) == list assert type(analysis_results[3]) == str assert type(analysis_results[5]) == list for exception_tuple in analysis_results[5]: assert type(exception_tuple) == tuple assert len(exception_tuple) == 3 assert isinstance(exception_tuple[0], type) assert isinstance(exception_tuple[1], Exception) assert isinstance(exception_tuple[2], object) assert analysis_results[0] == test_case['tc_basic']['id'] except AssertionError: self.api_error( 'Problem with the analyse of TC %s, wrong result received' % testcase_id) return # Only take the first verdict = OrderedDict() verdict['_type'] = 'verdict' verdict['verdict'] = analysis_results[1] verdict['description'] = analysis_results[3] verdict['review_frames'] = analysis_results[2] token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token # Prepare the result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [ token_res, test_case['tc_basic'], verdict ] # Here we will analyze the pcap file and get the results as json print(json.dumps(json_result)) return # POST handler for the analyzer_allMightyAnalyze uri # It will allow users to analyze a pcap file without giving # a corresponding test case # # \param pcap_file => The pcap file that we want to analyze # \param token => The token previously provided # The pcap_file or the token is required, having both is also forbidden # elif self.path == '/api/v1/analyzer_allMightyAnalyze': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Not implemented for the moment self.api_error( "This method is not implemented yet, please come back later") return # POST handler for the dissector_dissectFile uri # It will allow users to analyze a pcap file corresponding to a TC # # \param pcap_file => The pcap file that we want to dissect # \param protocol_selection => The protocol name # elif self.path == '/api/v1/dissector_dissectFile': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the content type try: content_type = cgi.parse_header(self.headers['Content-Type']) except TypeError: self.api_error( "Non empty POST datas and format of 'multipart/form-data' expected" ) return # Check headers if any((len(content_type) == 0, content_type[0] is None, content_type[0] != 'multipart/form-data')): self.api_error( "POST format of 'multipart/form-data' expected, no file input 'pcap_file' found" ) return # Get post values form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': content_type[0] }) # Check the parameters passed if any((len(form) != 2, 'pcap_file' not in form, 'protocol_selection' not in form)): self.api_error( 'Expected POST=(pcap_file={file}, protocol_selection={text})' ) return # Check the protocol_selection value protocol_selection = form.getvalue('protocol_selection') if not type(protocol_selection) == str: self.api_error( 'Expected protocol_selection post value to be a text (eq string)' ) return # In function of the protocol asked prot = get_protocol(protocol_selection) if prot is None: self.api_error('Unknown protocol %s' % protocol_selection) return # Generate a new token token = get_token() # Get the pcap file pcap_file = form.getvalue('pcap_file') # Path to save the file pcap_path = os.path.join(TMPDIR, token + '.dump') # Write the pcap file to a temporary destination try: with open(pcap_path, 'wb') as f: f.write(pcap_file) except: self.api_error("Couldn't write the temporary file") return # Prepare the result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token # Get the dissection from dissector tool try: dissection = Dissector(pcap_path).dissect(eval(prot['name'])) except TypeError as e: self.api_error('Dissector error: ' + str(e)) return except pure_pcapy.PcapError: self.api_error( "Expected 'pcap_file' to be a non empty pcap file") return except: self.api_error( "Couldn't read the temporary file %s and protocol is %s" % (pcap_path, prot['name'])) return # Save the json dissection result into a file json_save = os.path.join(TMPDIR, token + '.json') try: with open(json_save, 'w') as f: json.dump(dissection, f) except: self.api_error("Couldn't write the json file") return # Add the token to the results dissection.insert(0, token_res) # The json result to return json_result['content'] = dissection # Here we will analyze the pcap file and get the results as json print(json.dumps(json_result)) return # ######################## End of API part ######################### # # DEPRECATED # elif (self.path == "/submit"): # if os.fork(): # # close the socket right now(because the # # requesthandler may do a shutdown(), which triggers a # # SIGCHLD in the child process) # self.connection.close() # return # parser = BytesFeedParser() # ct = self.headers.get("Content-Type") # if not ct.startswith("multipart/form-data;"): # self.send_error(400) # return # parser.feed(bytes("Content-Type: %s\r\n\r\n" % ct, "ascii")) # parser.feed(self.rfile.read(int(self.headers['Content-Length']))) # msg = parser.close() # # agree checkbox is selected # for part in msg.get_payload(): # if isinstance(part, email.message.Message): # disposition = part.get("content-disposition") # if disposition and 'name="agree"' in disposition: # agree = True # break # else: # agree = False # # urifilter checkbox is selected # for part in msg.get_payload(): # if isinstance(part, email.message.Message): # disposition = part.get("content-disposition") # if disposition and 'name="urifilter"' in disposition: # urifilter = True # break # else: # urifilter = False # # content of the regex box # for part in msg.get_payload(): # if isinstance(part, email.message.Message): # disposition = part.get("content-disposition") # if disposition and 'name="regex"' in disposition: # regex = part.get_payload() # if not regex: # regex = None # break # else: # regex = None # # profile radio buttons # for part in msg.get_payload(): # if isinstance(part, email.message.Message): # disposition = part.get("content-disposition") # if disposition and 'name="profile"' in disposition: # profile = part.get_payload() # break # else: # profile = "client" # # receive the pcap file # for part in msg.get_payload(): # if isinstance(part, email.message.Message): # disposition = part.get("content-disposition") # if disposition and 'name="file"' in disposition: # mo = re.search('filename="([^"]*)"', disposition) # orig_filename = mo.group(1) if mo else None # timestamp = time.strftime("%y%m%d_%H%M%S") # pcap_file = os.path.join( # (DATADIR if agree else TMPDIR), # "%s_%04d.dump" % (timestamp, job_id) # ) # self.log_message("uploading %s(urifilter=%r, regex=%r)", pcap_file, urifilter, regex) # with open(pcap_file, "wb") as fd: # # FIXME: using hidden API(._payload) because it seems that there is something broken with the encoding when getting the payload using .get_payload() # fd.write(part._payload.encode("ascii", errors="surrogateescape")) # break # else: # self.send_error(400) # return # self.send_response(200) # self.send_header("Content-Type", "text/html;charset=utf-8") # self.end_headers() # out = UTF8Wrapper(self.wfile) # self.wfile.flush() # os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # try: # exceptions = [] # analysis.analyse_file_html(pcap_file, orig_filename, urifilter, exceptions, regex, profile) # for tc in exceptions: # self.log_message("exception in %s", type(tc).__name__, append=tc.exception) # except pure_pcapy.PcapError: # print("Bad file format!") # shutdown() # If we didn't manage to bind the request else: self.send_error(404) return
def do_GET(self): # Get the url and parse it url = urlparse(self.path) if url.path == "/coap-tool.sh": fp = open("coap-tool.sh", "rb") if not fp: self.send_response(500) return self.send_response(200) self.send_header("Content-Type", "text/x-sh") self.end_headers() self.wfile.write(fp.read()) return elif url.path == "/doc/ETSI-CoAP4-test-list.pdf": fp = open("doc/ETSI-CoAP4-test-list.pdf", "rb") if not fp: self.send_response(500) return self.send_response(200) self.send_header("Content-Type", "application/pdf") self.end_headers() self.wfile.write(fp.read()) return elif url.path == "/doc/Additive-IRISA-CoAP-test-list.pdf": fp = open("doc/Additive-IRISA-CoAP-test-list.pdf", "rb") if not fp: self.send_response(500) return self.send_response(200) self.send_header("Content-Type", "application/pdf") self.end_headers() self.wfile.write(fp.read()) return elif url.path == "/doc/Additive-IRISA-CoAP-test-description.pdf": fp = open("doc/Additive-IRISA-CoAP-test-description.pdf", "rb") if not fp: self.send_response(500) return self.send_response(200) self.send_header("Content-Type", "application/pdf") self.end_headers() self.wfile.write(fp.read()) return # ########################## ttproto API ########################### # # ##### Personnal remarks # # For the moment, using this webserver is right but for scaling maybe a # strong web platform using a framework will be better. This one is # sufficient for the moment. # # We check on the path for whole uri, maybe we should bind a version to # a beginning like "/api/v1" and then bind the methods put behind it. # # ##### End of remarks # GET handler for the analyzer_getTestCases uri # It will give to the gui the list of the test cases # elif url.path == '/api/v1/analyzer_getTestCases': # Send the header self.send_response(200) self.send_header("Content-Type", "application/json;charset=utf-8") self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the list of test cases try: test_cases = get_test_cases() except FileNotFoundError as fnfe: self.api_error( 'Problem during fetching the test cases list:\n' + str(fnfe)) return clean_test_cases = [] for tc in test_cases: clean_test_cases.append(test_cases[tc]['tc_basic']) # If no test case found if len(clean_test_cases) == 0: self.api_error('No test cases found') return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = clean_test_cases # Just give the json representation of the test cases list print(json.dumps(json_result)) return # GET handler for the analyzer_getTestcaseImplementation uri # It will allow developpers to get the implementation script of a TC # # /param testcase_id => The unique id of the test case # elif url.path == '/api/v1/analyzer_getTestcaseImplementation': # Send the header self.send_response(200) self.send_header("Content-Type", "application/json;charset=utf-8") self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Check parameters if any((len(params) != 1, 'testcase_id' not in params, not correct_get_param(params['testcase_id']))): raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects GET parameters, expected '?testcase_id={string}'" ) return # Get the test case try: test_case = get_test_cases(params['testcase_id'][0]) except FileNotFoundError: self.api_error('Test case %s not found' % params['testcase_id'][0]) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [ test_case['tc_basic'], test_case['tc_implementation'] ] # Here the process from ttproto core print(json.dumps(json_result)) return # GET handler for the analyzer_getProtocols uri # It will give to the gui the list of the protocols implemented # elif url.path in ('/api/v1/analyzer_getProtocols', '/api/v1/dissector_getProtocols'): # Send the header self.send_response(200) self.send_header("Content-Type", "application/json;charset=utf-8") self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [] # Get the protocols protocols = get_protocol() for prot in protocols: json_result['content'].append(protocols[prot]) # Just give the json representation of the test cases list print(json.dumps(json_result)) return # GET handler for the analyzer_getFrames and dissector_getFrames uri # It will allow an user to get a single frame from the previous pcap # if no frame_id provided, otherwise it will return all the frames # # /param token => The token of the corresponding pcap file # /param protocol_selection => The protocol we want to filter on # /param frame_id (optional) => The id of the wanted frame # # /remark We redirect to the same function but later those two # may diverge # elif url.path in ('/api/v1/analyzer_getFrames', '/api/v1/dissector_getFrames'): # Send the header self.send_response(200) self.send_header("Content-Type", "application/json;charset=utf-8") self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Only token and protocol_selection if len(params) == 2: if any( ('token' not in params, 'protocol_selection' not in params, not correct_get_param(params['token']), not correct_get_param(params['protocol_selection']))): raise # token, protocol_selection and frame_id elif len(params) == 3: if any( ('token' not in params, 'protocol_selection' not in params, 'frame_id' not in params, not correct_get_param(params['token']), not correct_get_param(params['frame_id'], is_number=True), not correct_get_param(params['protocol_selection']))): raise # Wrong number of parameters else: raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects parameters expected '?token={string}&protocol_selection={string}(&frame_id={integer})?'" ) return # Format the id before passing it to the process function token = params['token'][0] protocol_selection = params['protocol_selection'][0] # Check the protocol protocol = get_protocol(protocol_selection) if protocol is None: self.api_error('Unknown %s protocol' % protocol_selection) return # Create the frames object frames = [] # Get the json file try: json_path = os.path.join(TMPDIR, "%s.json" % token) with open(json_path, 'r') as json_fp: frames = json.load(json_fp, object_pairs_hook=OrderedDict) except: self.api_error('Session identified by token %s not found' % token) return # If a single file asked if len(params) == 3: frame_id = int(params['frame_id'][0]) for frame in frames: if frame['id'] == frame_id: frames = [frame] # If no frame with this id if len(frames) != 1: self.api_error('No frame with id=%u found' % frame_id) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token frames.insert(0, token_res) json_result['content'] = frames # Dump the json result print(json.dumps(json_result)) return # GET handler for the dissector_getFramesSummary uri # It will allow an user to get all the frames summary # # /param token => The token of the corresponding pcap file # /param protocol_selection => The protocol we want to filter on # elif url.path == '/api/v1/dissector_getFramesSummary': # Send the header self.send_response(200) self.send_header("Content-Type", "application/json;charset=utf-8") self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Only token and protocol_selection if any((len(params) != 2, 'token' not in params, 'protocol_selection' not in params, not correct_get_param(params['token']), not correct_get_param(params['protocol_selection']))): raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects parameters expected '?token={string}&protocol_selection={string}'" ) return # Format the id before passing it to the process function token = params['token'][0] protocol_selection = params['protocol_selection'][0] # Check the protocol protocol = get_protocol(protocol_selection) if protocol is None: self.api_error('Unknown %s protocol' % protocol_selection) return # Get the dump file pcap_path = os.path.join(TMPDIR, "%s.dump" % token) try: # Get summaries from it frames_summary = Dissector(pcap_path).summary( eval(protocol['name'])) except TypeError as e: self.api_error('Dissector error:\n' + str(e)) return except: self.api_error('Session identified by token %s not found' % token) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token json_result['content'] = [] json_result['content'].append(token_res) # Add each frame summary for f_id, f_sum in frames_summary: summary = OrderedDict() summary['_type'] = 'frame_summary' summary['frame_id'] = f_id summary['frame_summary'] = f_sum json_result['content'].append(summary) # Dump the json result print(json.dumps(json_result)) return # ######################## End of API part ######################### # elif url.path != "/": self.send_error(404) return self.send_response(200) self.send_header("Content-Type", "text/html;charset=utf-8") self.end_headers() with XHTML10Generator(output=UTF8Wrapper(self.wfile)) as g: with g.head: g.title("IRISA CoAP interoperability Testing Tool") g.style( "img {border-style: solid; border-width: 20px; border-color: white;}", type="text/css") g.h1("IRISA CoAP interoperability Testing Tool") g.b("Tool version: ") # g("%s" % Analyzer.TOOL_VERSION) with g.br(): # FIXME: bug generator pass with g.form(method="POST", action="submit", enctype="multipart/form-data"): g("This tool(more details at ") g.a("www.irisa.fr/tipi", href= "http://www.irisa.fr/tipi/wiki/doku.php/passive_validation_tool_for_coap" ) g(") allows executing CoAP interoperability test suites(see below Available Test Scenarios) on the provided traces of CoAP Client-Server interactions." ) g.br() g.h3("Available Test Scenarios:") g("- ETSI COAP#4 Plugtest scenarios: ") g.a("ETSI-CoAP4-test-list", href="doc/ETSI-CoAP4-test-list.pdf") g(", ") g.a("ETSI-CoAP4-test-description", href="https://github.com/cabo/td-coap4/") with g.br( ): # FIXME: bug in generation if we remove the with context pass g("- Additive test scenarios developed by IRISA/Tipi Group: ") g.a("Additive-IRISA-CoAP-test-list", href="doc/Additive-IRISA-CoAP-test-list.pdf") g(", ") g.a("Additive-IRISA-CoAP-test-description", href="doc/Additive-IRISA-CoAP-test-description.pdf") with g.br( ): # FIXME: bug in generation if we remove the with context pass g.h3("IETF RFCs/Drafts covered:") g("- CoAP CORE(") g.a("RFC7252", href="http://tools.ietf.org/html/rfc7252") g(")") with g.br( ): # FIXME: bug in generation if we remove the with context pass g("- CoAP OBSERVE(") g.a("draft-ietf-core-observe-16", href="http://tools.ietf.org/html/draft-ietf-core-observe-16" ) g(")") with g.br( ): # FIXME: bug in generation if we remove the with context pass g("- CoAP BLOCK(") g.a("draft-ietf-core-block-17", href="http://tools.ietf.org/html/draft-ietf-core-block-17") g(")") g.br() with g.br( ): # FIXME: bug in generation if we remove the with context pass g("==========================================================================================" ) with g.br( ): # FIXME: bug in generation if we remove the with context pass g("Submit your traces(pcap format). \nWarning!! pcapng format is not supported; you should convert your pcapng file to pcap format." ) with g.br( ): # FIXME: bug in generation if we remove the with context pass g.input(name="file", type="file", size=60) with g.br( ): # FIXME: bug in generation if we remove the with context pass g("Configuration") g.br() with g.select(name="profile"): g.option("Client <-> Server", value="client", selected="1") g.option("Reverse-Proxy <-> Server", value="reverse-proxy") with g.br( ): # FIXME: bug in generation if we remove the with context pass g("Optional regular expression for selecting scenarios(eg: ") g.tt("CORE_0[1-2]") g(" will run only ") g.tt("TD_COAP_CORE_01") g(" and ") g.tt("TD_COAP_CORE_02") g(")") g.br() g.input(name="regex", size=60) g.br() with g.br( ): # FIXME: bug in generation if we remove the with context pass with g.input(name="agree", type="checkbox", value="1"): pass g("I agree to leave a copy of this file on the server(for debugging purpose). Thanks" ) g.br() with g.input(name="urifilter", type="checkbox", value="1"): pass g("Filter conversations by URI(/test vs. /separate vs. /.well-known/core ...) to reduce verbosity" ) g.br() g.br() g.input(type="submit") with g.br(): # FIXME: bug generator pass g.b("Note:") g(" alternatively you can use the shell script ") g.a("coap-tool.sh", href="coap-tool.sh") g(" to capture and submit your traces to the server(requires tcpdump and curl installed on your system)." ) g.a(href="http://www.irisa.fr/tipi").img( src= "http://www.irisa.fr/tipi/wiki/lib/tpl/tipi_style/images/irisa.jpg", height="40") g.a(href="http://www.irisa.fr/tipi").img( src= "http://www.irisa.fr/tipi/wiki/lib/tpl/tipi_style/images/tipi_small.png", height="50") html_changelog(g)
def do_POST(self): # The job counter global job_id job_id += 1 # ########################## ttproto API ########################### # # POST handler for the analyzer_testCaseAnalyze uri # It will allow users to analyze a pcap file corresponding to a TC # # \param pcap_file => The pcap file that we want to analyze # \param token => The token previously provided # \param testcase_id => The id of the corresponding test case # The pcap_file or the token is required, having both is also forbidden # if self.path == '/api/v1/analyzer_testCaseAnalyze': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the content type try: content_type = cgi.parse_header(self.headers['Content-Type']) except TypeError: self.api_error( "Non empty POST datas and format of 'multipart/form-data' expected" ) return # Get post values form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, keep_blank_values=True, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': content_type[0] }) # Check that we have the two values if any(( len(form) != 2, 'testcase_id' not in form, all(( # None of the two required => Error 'pcap_file' not in form, 'token' not in form )), all(( # Both of them => Error 'pcap_file' in form, 'token' in form )) )): self.api_error( 'Expected POST=([pcap_file={file}|token={text}], testcase_id={text})' ) return # Get the test case and its informations testcase_id = form.getvalue('testcase_id') if not type(testcase_id) == str: self.api_error('The value of the testcase_id should be a string from text input') return # Try to get the test case try: test_case = get_test_cases(testcase_id) except FileNotFoundError: self.api_error('Test case %s not found' % testcase_id) return # Get the token token = form.getvalue('token') # Get analysis results from the token if token: # Just get the path pcap_path = os.path.join( TMPDIR, token + '.dump' ) # Get analysis results from the pcap file else: # Check headers if any(( len(content_type) == 0, content_type[0] is None, content_type[0] != 'multipart/form-data' )): self.api_error( "POST format of 'multipart/form-data' expected, no file input 'pcap_file' found" ) return # Get the same token or generate a new one token = get_token(token) # Get and check the pcap file entered pcap_file = form.getvalue('pcap_file') # Path to save the file pcap_path = os.path.join( TMPDIR, token + '.dump' ) # Write the pcap file to a temporary destination try: with open(pcap_path, 'wb') as f: f.write(pcap_file) except: self.api_error( "Couldn't write the temporary file %s" % pcap_path ) return # Get the dissection from analysis tool try: dissection = Dissector(pcap_path).dissect() except pure_pcapy.PcapError: self.api_error( "Expected 'pcap_file' to be a non empty pcap file" ) except: self.api_error( "Couldn't read the temporary file %s" % pcap_path ) return # Save the json dissection result into a file json_save = os.path.join( TMPDIR, token + '.json' ) try: with open(json_save, 'w') as f: json.dump(dissection, f) except: self.api_error("Couldn't write the json file") return # Get the result of the analysis analysis_results = Analyzer('tat_coap').analyse( pcap_path, testcase_id ) self.log_message("Analysis result: " + str(analysis_results)) # Error for some test cases that the analysis doesn't manage to get try: assert type(analysis_results[4]) is list if len(analysis_results[4]) != 0: assert type(analysis_results[4][0]) is tuple assert type(analysis_results) == tuple assert len(analysis_results) == 6 assert type(analysis_results[0]) == str assert type(analysis_results[1]) == str assert type(analysis_results[2]) == list assert type(analysis_results[3]) == str assert type(analysis_results[5]) == list for exception_tuple in analysis_results[5]: assert type(exception_tuple) == tuple assert len(exception_tuple) == 3 assert isinstance(exception_tuple[0], type) assert isinstance(exception_tuple[1], Exception) assert isinstance(exception_tuple[2], object) assert analysis_results[0] == test_case['tc_basic']['id'] except AssertionError: self.api_error( 'Problem with the analyse of TC %s, wrong result received' % testcase_id ) return # Only take the first verdict = OrderedDict() verdict['_type'] = 'verdict' verdict['verdict'] = analysis_results[1] verdict['description'] = analysis_results[3] verdict['review_frames'] = analysis_results[2] verdict['partial_verdicts'] = analysis_results[4] token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token # Prepare the result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [ token_res, test_case['tc_basic'], verdict ] self.log_message("Analysis response sent: " + str((analysis_results[4]))) self.log_message("Analysis response sent: " + str(json.dumps(json_result))) # Here we will analyze the pcap file and get the results as json print(json.dumps(json_result)) return # POST handler for the analyzer_allMightyAnalyze uri # It will allow users to analyze a pcap file without giving # a corresponding test case # # \param pcap_file => The pcap file that we want to analyze # \param token => The token previously provided # The pcap_file or the token is required, having both is also forbidden # elif self.path == '/api/v1/analyzer_allMightyAnalyze': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Not implemented for the moment self.api_error( "This method is not implemented yet, please come back later" ) return # POST handler for the dissector_dissectFile uri # It will allow users to analyze a pcap file corresponding to a TC # # \param pcap_file => The pcap file that we want to dissect # \param protocol_selection => The protocol name # elif self.path == '/api/v1/dissector_dissectFile': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the content type try: content_type = cgi.parse_header(self.headers['Content-Type']) except TypeError: self.api_error( "Non empty POST datas and format of 'multipart/form-data' expected" ) return # Check headers if any(( len(content_type) == 0, content_type[0] is None, content_type[0] != 'multipart/form-data' )): self.api_error( "POST format of 'multipart/form-data' expected, no file input 'pcap_file' found" ) return # Get post values form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': content_type[0] }) # Check the parameters passed if any(( len(form) != 2, 'pcap_file' not in form, 'protocol_selection' not in form )): self.api_error( 'Expected POST=(pcap_file={file}, protocol_selection={text})' ) return # Check the protocol_selection value protocol_selection = form.getvalue('protocol_selection') if not type(protocol_selection) == str: self.api_error('Expected protocol_selection post value to be a text (eq string)') return # In function of the protocol asked prot = get_protocol(protocol_selection) if prot is None: self.api_error('Unknown protocol %s' % protocol_selection) return # Generate a new token token = get_token() # Get the pcap file pcap_file = form.getvalue('pcap_file') # Path to save the file pcap_path = os.path.join( TMPDIR, token + '.dump' ) # Write the pcap file to a temporary destination try: with open(pcap_path, 'wb') as f: f.write(pcap_file) except: self.api_error("Couldn't write the temporary file") return # Prepare the result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token # Get the dissection from dissector tool try: dissection = Dissector(pcap_path).dissect(eval(prot['name'])) except TypeError as e: self.api_error('Dissector error: ' + str(e)) return except pure_pcapy.PcapError: self.api_error( "Expected 'pcap_file' to be a non empty pcap file" ) return except: self.api_error( "Couldn't read the temporary file %s and protocol is %s" % ( pcap_path, prot['name'] ) ) return # Save the json dissection result into a file json_save = os.path.join( TMPDIR, token + '.json' ) try: with open(json_save, 'w') as f: json.dump(dissection, f) except: self.api_error("Couldn't write the json file") return # Add the token to the results dissection.insert(0, token_res) # The json result to return json_result['content'] = dissection # Here we will analyze the pcap file and get the results as json print(json.dumps(json_result)) return # If we didn't manage to bind the request else: self.send_error(404) return
def do_GET(self): # Get the url and parse it url = urlparse(self.path) # ##### Personnal remarks # # For the moment, using this webserver is right but for scaling maybe a # strong web platform using a framework will be better. This one is # sufficient for the moment. # # We check on the path for whole uri, maybe we should bind a version to # a beginning like "/api/v1" and then bind the methods put behind it. # # ##### End of remarks # GET handler for the analyzer_getTestCases uri # It will give to the gui the list of the test cases # if url.path == '/api/v1/analyzer_getTestCases': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the list of test cases try: test_cases = get_test_cases() except FileNotFoundError as fnfe: self.api_error( 'Problem during fetching the test cases list:\n' + str(fnfe) ) return clean_test_cases = [] for tc in test_cases: clean_test_cases.append(test_cases[tc]['tc_basic']) # If no test case found if len(clean_test_cases) == 0: self.api_error('No test cases found') return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = clean_test_cases # Just give the json representation of the test cases list print(json.dumps(json_result)) return # GET handler for the analyzer_getTestcaseImplementation uri # It will allow developpers to get the implementation script of a TC # # /param testcase_id => The unique id of the test case # elif url.path == '/api/v1/analyzer_getTestCaseImplementation': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Check parameters if any(( len(params) != 1, 'testcase_id' not in params, not correct_get_param(params['testcase_id']) )): raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects GET parameters, expected '?testcase_id={string}'" ) return # Get the test case try: test_case = get_test_cases(params['testcase_id'][0], True) except FileNotFoundError: self.api_error( 'Test case %s not found' % params['testcase_id'][0] ) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [ test_case['tc_basic'], test_case['tc_implementation'] ] # Here the process from ttproto core print(json.dumps(json_result)) return # GET handler for the analyzer_getTestcaseSteps uri # It will give the different steps of a TC # # /param testcase_id => The unique id of the test case # elif url.path == '/api/v1/analyzer_getTestcaseSteps': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Check parameters if any(( len(params) != 1, 'testcase_id' not in params, not correct_get_param(params['testcase_id']) )): raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects GET parameters, expected '?testcase_id={string}'" ) return # Get the test case try: tc_id = params['testcase_id'][0] steps = get_test_steps(tc_id) except: self.api_error( 'Steps of test case %s not found' % tc_id ) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = steps # Here the process from ttproto core print(json.dumps(json_result)) return # GET handler for the analyzer_getProtocols uri # It will give to the gui the list of the protocols implemented # elif url.path in ( '/api/v1/analyzer_getProtocols', '/api/v1/dissector_getProtocols' ): # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True json_result['content'] = [] # Get the protocols protocols = get_protocol() for prot in protocols: json_result['content'].append(protocols[prot]) # Just give the json representation of the test cases list print(json.dumps(json_result)) return # GET handler for the analyzer_getFrames and dissector_getFrames uri # It will allow an user to get a single frame from the previous pcap # if no frame_id provided, otherwise it will return all the frames # # /param token => The token of the corresponding pcap file # /param protocol_selection => The protocol we want to filter on # /param frame_id (optional) => The id of the wanted frame # # /remark We redirect to the same function but later those two # may diverge # elif url.path in ( '/api/v1/analyzer_getFrames', '/api/v1/dissector_getFrames' ): # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Only token and protocol_selection if len(params) == 2: if any(( 'token' not in params, 'protocol_selection' not in params, not correct_get_param(params['token']), not correct_get_param(params['protocol_selection']) )): raise # token, protocol_selection and frame_id elif len(params) == 3: if any(( 'token' not in params, 'protocol_selection' not in params, 'frame_id' not in params, not correct_get_param(params['token']), not correct_get_param(params['frame_id'], is_number=True), not correct_get_param(params['protocol_selection']) )): raise # Wrong number of parameters else: raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects parameters expected '?token={string}&protocol_selection={string}(&frame_id={integer})?'" ) return # Format the id before passing it to the process function token = params['token'][0] protocol_selection = params['protocol_selection'][0] # Check the protocol protocol = get_protocol(protocol_selection) if protocol is None: self.api_error( 'Unknown %s protocol' % protocol_selection ) return # Create the frames object frames = [] # Get the json file try: json_path = os.path.join( TMPDIR, "%s.json" % token ) with open(json_path, 'r') as json_fp: frames = json.load(json_fp, object_pairs_hook=OrderedDict) except: self.api_error( 'Session identified by token %s not found' % token ) return # If a single file asked if len(params) == 3: frame_id = int(params['frame_id'][0]) for frame in frames: if frame['id'] == frame_id: frames = [frame] # If no frame with this id if len(frames) != 1: self.api_error( 'No frame with id=%u found' % frame_id ) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token frames.insert(0, token_res) json_result['content'] = frames # Dump the json result print(json.dumps(json_result)) return # GET handler for the dissector_getFramesSummary uri # It will allow an user to get all the frames summary # # /param token => The token of the corresponding pcap file # /param protocol_selection => The protocol we want to filter on # elif url.path == '/api/v1/dissector_getFramesSummary': # Send the header self.send_response(200) self.send_header('Content-Type', 'application/json;charset=utf-8') self.end_headers() # Bind the stdout to the http output os.dup2(self.wfile.fileno(), sys.stdout.fileno()) # Get the parameters params = parse_qs(url.query) try: # Only token and protocol_selection if any(( len(params) != 2, 'token' not in params, 'protocol_selection' not in params, not correct_get_param(params['token']), not correct_get_param(params['protocol_selection']) )): raise # Catch errors (key mostly) or if wrong parameter except: self.api_error( "Incorrects parameters expected '?token={string}&protocol_selection={string}'" ) return # Format the id before passing it to the process function token = params['token'][0] protocol_selection = params['protocol_selection'][0] # Check the protocol protocol = get_protocol(protocol_selection) if protocol is None: self.api_error( 'Unknown %s protocol' % protocol_selection ) return # Get the dump file pcap_path = os.path.join( TMPDIR, "%s.dump" % token ) try: # Get summaries from it frames_summary = Dissector(pcap_path).summary( eval(protocol['name']) ) except TypeError as e: self.api_error('Dissector error:\n' + str(e)) return except: self.api_error( 'Session identified by token %s not found' % token ) return # The result to return json_result = OrderedDict() json_result['_type'] = 'response' json_result['ok'] = True token_res = OrderedDict() token_res['_type'] = 'token' token_res['value'] = token json_result['content'] = [] json_result['content'].append(token_res) # Add each frame summary for f_id, f_sum in frames_summary: summary = OrderedDict() summary['_type'] = 'frame_summary' summary['frame_id'] = f_id summary['frame_summary'] = f_sum json_result['content'].append(summary) # Dump the json result print(json.dumps(json_result)) return else: self.send_error(404) return