def main(): version = pkg_resources.require("gnss-benchmark")[0].version args = docopt.docopt(__doc__, version=version, options_first=False) logger.set_level(args['--log']) logger.debug("Start main, parsed arg\n {}".format(args)) dataset_path = args['--dataset'] if args['--dataset'] else report.DATASET_PATH if args['make_report']: report.make(jason.processing_engine, description_files_root_path=dataset_path, output_folder=args['--output-folder'], report_name=args['--filename'], runby=args['--runby'], tests=args['--test']) if args['list_tests']: test_list = report.get_test_list(description_files_root_path=dataset_path) sys.stdout.write('\n'.join(test_list) + '\n') return 0
def run(self, rover_file, strategy, rover_dynamics, base_file=None, base_lonlathgt=None, label='gnss-benchmark'): """ Definition of the processing engine to be used to compute the solution. You can use your own processing engine as long as it conforms to the signature :params rover_file: The data file to process (e.g. RINEX file) :params strategy: Which processing strategy to use (SPP, PPP, PPK) :params base_file: (optional, only for PPK processing) name of the filename with the base GNSS data :params base_lonlathgt: (optional, only for PPK processing) The method should return a named numpy array with at least the following fields: - 'GPSW': GPS week - 'GPSSoW': GPS seconds of the week - 'latitudedeg': Latitude in degrees - 'longitudedeg': Longitude in degrees - 'heightm': Height in meters """ result_zip_file = jason_gnss.commands.process( rover_file=rover_file, strategy=strategy, rover_dynamics=rover_dynamics, base_file=base_file, base_lonlathgt=base_lonlathgt, label=label) pos_estimates = None with zipfile.ZipFile(result_zip_file, 'r') as jason_zip: namelist = jason_zip.namelist() pattern = '{}.csv'.format(strategy) candidate_list = list( filter(lambda x: x.endswith(pattern), namelist)) logger.debug('Files within the zip file: {}'.format(namelist)) logger.debug( 'Result files from GNSS job: {}'.format(candidate_list)) if candidate_list: with jason_zip.open(candidate_list[0]) as csv_fh: pos_estimates = np.genfromtxt(csv_fh, names=True, delimiter=",") pos_estimates = np.atleast_1d( pos_estimates) # for one-row only cases os.remove(result_zip_file) return pos_estimates
def process(rover_file, process_type="GNSS", base_file=None, base_lonlathgt=None, images_folder=None, timeout=None, **kwargs): """ Submit a process to Jason and wait for it to end so that the results file is also download """ logger.info('Process file [ {} ]'.format(rover_file)) logger.debug('Timeout {}'.format(timeout)) process_id = submit(rover_file, process_type=process_type, base_file=base_file, base_lonlathgt=base_lonlathgt, images_folder=images_folder, **kwargs) if process_id is None: logger.critical( 'Could not submit [ {} ] for processing'.format(rover_file)) return None logger.info('Submitted process with ID {}'.format(process_id)) start_time = time.time() spinner = __spinning_cursor__() while True: process_status = status(process_id) logger.debug('Processing status {}'.format(process_status)) if process_status == 'FINISHED': logger.info('Completed process with ID {}'.format(process_id)) return download(process_id) elif process_status == 'ERROR': logger.critical('An unexpected error occurred in the task!') return None # Spinner sys.stderr.write(next(spinner)) sys.stderr.flush() time.sleep(1) sys.stderr.write('\b') if (timeout and time.time() - start_time > timeout): logger.critical( "Time Out! The process did not end in " + "[ {} ] seconds, ".format(timeout) + "but might be available for download at a later stage.") return None logger.critical('Unexpected error occured') return None
def status(process_id, **kwargs): """ Get the status of the given process_id """ res = None ret, return_code = jason.get_status(process_id) logger.debug('Return code {}'.format(ret)) if return_code == 200: res = ret['process']['status'] return res
def main(): """ """ version = pkg_resources.require("jason-gnss")[0].version args = docopt.docopt(__doc__, version=version, options_first=False) logger.set_level(args['--debug']) logger.debug("Start main, parsed arg\n {}".format(args)) command, command_args = __get_command__(args) try: res = command(**command_args) if res: sys.stdout.write('{}\n'.format(res)) except (AuthenticationError, ValueError) as e: logger.critical(str(e)) return 0
def _render_report(descriptions, results, output_folder, report_name, runby, processing_engine): statistics = _compute_statistics(descriptions, results) logger.debug(f'Computed statistics') statistic_tables = _build_markdown_tables(descriptions, statistics) logger.debug(f'Computed statistics table') output_abspath = os.path.abspath(output_folder) logger.debug(f'Output absolute path [ {output_abspath} ]') cwd = os.getcwd() with tempfile.TemporaryDirectory() as tempfolder: os.chdir(tempfolder) figure_path = os.path.join(tempfolder, 'figures') os.mkdir(figure_path) figures = {} for test_name, description in descriptions.items(): result = results[test_name] figures[test_name] = _make_plots(test_name, description, result, figure_path) doc = None with open(os.path.join(TEMPLATES_PATH, 'report.md.jinja'), 'r') as fh: template = jinja2.Template(fh.read()) render_values = { 'tests': descriptions, 'figures': figures, 'date': datetime.datetime.utcnow(), 'runby': runby, 'statistic_tables': statistic_tables, 'engine_version': processing_engine.version() } doc = template.render(render_values) markdown_filename = os.path.join(tempfolder, 'report.md') with open(markdown_filename, "w") as outfh: outfh.write(doc) logger.debug(f'Markdown report rendered {markdown_filename}') # hack: if read is not performed, the pandoc command does not work with open(markdown_filename, "r") as fh: _ = fh.read() output_filename = os.path.join(output_abspath, report_name) if output_filename.endswith('.md'): figure_dst_path = os.path.join(output_abspath, 'figures') shutil.rmtree(figure_dst_path, ignore_errors=True) shutil.copytree(figure_path, figure_dst_path) shutil.copy(markdown_filename, output_filename) else: cmd = ["pandoc", "-o", output_filename, markdown_filename] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() logger.debug(f'pandoc stdout: {stdout}') logger.debug(f'pandoc stderr: {stderr}') os.chdir(cwd) logger.debug(f'Written report: {output_filename}') return output_filename
#!/usr/bin/env python3 import glob import json from roktools import logger if __name__ == "__main__": logger.set_level('DEBUG') for test_case_description in glob.glob("**/description.json", recursive=True): desc = None with open(test_case_description, 'r') as fh: desc = json.load(fh) if not desc: continue logger.info(f'Processing test case [ {test_case_description} ]') logger.debug(f'Test case description {desc}')
def submit_process(rover_file, process_type="GNSS", base_file=None, base_lonlathgt=None, camera_metadata_file=None, api_key=None, secret_token=None, rover_dynamics='dynamic', strategy='PPK/PPP', label="jason-gnss"): """ Submit a process to Jason PaaS :param rover_file: Filename with the GNSS measurements of the rover receiver :param process_type: Type of process to submit to Jason (GNSS or CONVERSION) :param base_file: Filename with the GNSS measurements of the base receiver :param base_lonlathgt: Array with the longitude, latitude and height :param camera_metadata_file: Filename with the exif data of the images in the folder :param api_key: Jason API key, if not provided will be fetched from the environement variables :param secret_token: Your Jason user secret token, if not provided will be fetched from the environement variables :param strategy: Force processing strategy (e.g. PPP or PPK). If left to None Jason will work on a best effort basis, trying to pick the most accurate strategy given the data provided by the user. :param rover_dynamics: Dynamics of the rover receiver ('static' or 'dynamic') :param label: specify a label for the process to submit """ if not os.path.isfile(rover_file): logger.critical("Rover file [ {} ] does not exist!".format(rover_file)) return None, None elif base_file and not os.path.isfile(base_file): logger.critical( "Base file [ {} ] specified but does not exist!".format(base_file)) return None, None rover_file_fh = open(rover_file, 'rb') base_file_fh = open(base_file, 'rb') if base_file else None camera_metadata_file_fh = open(camera_metadata_file, 'rb') if camera_metadata_file else None api_key, secret_token = __fetch_credentials__(api_key, secret_token) logger.debug('Submitting job to end-point {}'.format(API_URL)) url = '{}/processes'.format(API_URL) headers = __build_headers__(api_key) files = { 'type': (None, process_type), 'token': (None, secret_token), 'rover_file': (rover_file, rover_file_fh), 'rover_dynamics': (None, rover_dynamics), 'label': (None, label), 'camera_metadata_file': (camera_metadata_file, camera_metadata_file_fh) } if base_file: files.update({'base_file': (base_file, base_file_fh)}) config_file, config_file_fh = __create_config_file__(base_lonlathgt) if config_file: files.update({'config_file': ('config_file', config_file_fh)}) if camera_metadata_file: files.update({ 'camera_metadata_file': (camera_metadata_file, camera_metadata_file_fh) }) if base_lonlathgt: lon = base_lonlathgt[0] lat = base_lonlathgt[1] hgt = base_lonlathgt[2] pos_str = '{},{},{}'.format(lat, lon, hgt) files.update({'external_base_station_position': (None, pos_str)}) if strategy: files.update({'user_strategy': (None, strategy)}) logger.debug('Query parameters {}'.format(files)) r = requests.post(url, headers=headers, files=files) rover_file_fh.close() if base_file_fh: base_file_fh.close() if config_file_fh: config_file_fh.close() os.remove(config_file) if camera_metadata_file_fh: camera_metadata_file_fh.close() return r.json(), r.status_code