def updateReport(self, report_id, parsed_request): log.debug("Got this request %s" % parsed_request) try: report_path = self.report_dir.child(report_id) report_metadata_path = self.report_dir.child(report_id + METADATA_EXT) except InsecurePath: raise e.OONIBError(406, "Invalid report_id") content_format = parsed_request.get('format', 'yaml') if content_format == 'json': data = json_dumps(parsed_request['content']) data += "\n" elif content_format == 'yaml': data = parsed_request['content'] else: raise e.InvalidFormatField if not report_path.exists() or \ not report_metadata_path.exists(): raise e.OONIBError(404, "Report not found") with report_path.open('a') as fd: fd.write(data) report_metadata_path.touch() self.write({'status': 'success'})
def post(self): """ Creates a new report with the input * Request {'software_name': 'XXX', 'software_version': 'XXX', 'test_name': 'XXX', 'test_version': 'XXX', 'probe_asn': 'XXX' 'content': 'XXX' } Optional: 'test_helper': 'XXX' 'client_ip': 'XXX' (not implemented, nor in client, nor in backend) The idea behind these two fields is that it would be interesting to also collect how the request was observed from the collectors point of view. We use as a unique key the client_ip address and a time window. We then need to tell the test_helper that is selected the client_ip address and tell it to expect a connection from a probe in that time window. Once the test_helper sees a connection from that client_ip it will store for the testing session the data that it receives. When the probe completes the report (or the time window is over) the final report will include also the data collected from the collectors view point. * Response { 'backend_version': 'XXX', 'report_id': 'XXX', 'supported_formats': ['yaml', 'json'] } """ # Note: the request is being validated inside of parseNewReportRequest. report_data = parseNewReportRequest(self.request.body) log.debug("Parsed this data %s" % report_data) self.testName = str(report_data['test_name']) self.testVersion = str(report_data['test_version']) if self.policy_file: try: self.inputHashes = report_data['input_hashes'] except KeyError: raise e.InputHashNotProvided self.checkPolicy() data = None if report_data['format'] == 'yaml' and 'content' not in report_data: content = { 'software_name': str(report_data['software_name']), 'software_version': str(report_data['software_version']), 'probe_asn': str(report_data['probe_asn']), 'probe_cc': str(report_data['probe_cc']), 'test_name': self.testName, 'test_version': self.testVersion, 'input_hashes': report_data.get('input_hashes', []), 'test_start_time': str(report_data['test_start_time']), 'data_format_version': str(report_data.get('data_format_version', '0.1.0')) } data = "---\n" + yaml.dump(content) + "...\n" elif report_data['format'] == 'yaml' and 'content' in report_data: header = yaml.safe_load(report_data['content']) data = "---\n" + yaml.dump(validateHeader(header)) + "...\n" report_id = otime.timestamp() + '_' \ + report_data.get('probe_asn', 'AS0') + '_' \ + randomStr(50) # The report filename contains the timestamp of the report plus a # random nonce report_path = self.report_dir.child(report_id) # We use this file to store the metadata associated with the report # submission. report_metadata_path = self.report_dir.child(report_id + METADATA_EXT) response = { 'backend_version': config.backend_version, 'report_id': report_id, 'supported_formats': ['yaml', 'json'] } requested_helper = report_data.get('test_helper') if requested_helper: try: response['test_helper_address'] = self.helpers[ requested_helper].address except KeyError: raise e.TestHelperNotFound with report_metadata_path.open('w') as f: f.write(json_dumps(report_data)) f.write("\n") report_path.touch() if data is not None: with report_path.open('w') as f: f.write(data) self.write(response)