def output(id, url, download): """ Shows the output url of the run. By default opens the output page in your default browser. """ experiment = ExperimentClient().get(id) task_instance = TaskInstanceClient().get( get_module_task_instance_id(experiment.task_instances)) if "output" in task_instance.output_ids: resource = ResourceClient().get(task_instance.output_ids["output"]) output_dir_url = "{}/viewer/{}".format(floyd.floyd_host, resource.uri) if url: floyd_logger.info(output_dir_url) else: if download: output_dir_url = "{}&download=true".format(output_dir_url) ExperimentClient().download_tar(url=output_dir_url, untar=True, delete_after_untar=True) else: floyd_logger.info( "Opening output directory in your browser ...") webbrowser.open(output_dir_url) else: floyd_logger.error("Output directory not available")
def follow_logs(instance_log_id, sleep_duration=1): cur_idx = 0 while True: # Get the logs in a loop and log the new lines log_file_contents = ResourceClient().get_content(instance_log_id) print_output = log_file_contents[cur_idx:] cur_idx += len(print_output) sys.stdout.write(print_output) sleep(sleep_duration)
def logs(id, url, tail, follow, sleep_duration=1): """ Print the logs of the run. """ tail = tail or follow log_msg_printed = False while True: try: experiment = ExperimentClient().get(normalize_job_name(id)) except FloydException: experiment = ExperimentClient().get(id) instance_log_id = experiment.instance_log_id if instance_log_id: break elif not log_msg_printed: floyd_logger.info("Waiting for logs ...\n") log_msg_printed = True sleep(1) log_url = "{}/api/v1/resources/{}?content=true".format( floyd.floyd_host, instance_log_id) if url: floyd_logger.info(log_url) return if tail: floyd_logger.info("Launching job ...") current_shell_output = "" while True: # Get the logs in a loop and log the new lines log_file_contents = ResourceClient().get_content(instance_log_id) print_output = log_file_contents[len(current_shell_output):] if len(print_output.strip()): floyd_logger.info(print_output) current_shell_output = log_file_contents sleep(sleep_duration) else: log_file_contents = ResourceClient().get_content(instance_log_id) if len(log_file_contents.strip()): floyd_logger.info(log_file_contents) else: floyd_logger.info("Launching job now. Try after a few seconds.")
def logs(id, url, tail, sleep_duration=1): """ Print the logs of the run. """ try: experiment = ExperimentClient().get(normalize_job_name(id)) except FloydException: experiment = ExperimentClient().get(id) if experiment.state == 'queued': floyd_logger.info("Job is currently in a queue") return instance_log_id = experiment.instance_log_id if not instance_log_id: floyd_logger.info("Job not started yet, no log to show.") sys.exit(1) log_url = "{}/api/v1/resources/{}?content=true".format( floyd.floyd_host, instance_log_id) if url: floyd_logger.info(log_url) return if tail: floyd_logger.info("Launching job ...") current_shell_output = "" while True: # Get the logs in a loop and log the new lines log_file_contents = ResourceClient().get_content(instance_log_id) print_output = log_file_contents[len(current_shell_output):] if len(print_output.strip()): floyd_logger.info(print_output) current_shell_output = log_file_contents sleep(sleep_duration) else: log_file_contents = ResourceClient().get_content(instance_log_id) if len(log_file_contents.strip()): floyd_logger.info(log_file_contents) else: floyd_logger.info("Launching job now. Try after a few seconds.")
def logs(id, url, follow, sleep_duration=1): """ View the logs of a job. To follow along a job in real time, use the --follow flag """ instance_log_id = get_log_id(id) if url: log_url = "{}/api/v1/resources/{}?content=true".format( floyd.floyd_host, instance_log_id) floyd_logger.info(log_url) return if follow: floyd_logger.info("Launching job ...") follow_logs(instance_log_id, sleep_duration) else: log_file_contents = ResourceClient().get_content(instance_log_id) if len(log_file_contents.strip()): floyd_logger.info(log_file_contents.rstrip()) else: floyd_logger.info("Launching job now. Try after a few seconds.")
def logs(id, url, tail, follow, sleep_duration=1): """ Print the logs of the run. """ tail = tail or follow instance_log_id = get_log_id(id) if url: log_url = "{}/api/v1/resources/{}?content=true".format( floyd.floyd_host, instance_log_id) floyd_logger.info(log_url) return if tail: floyd_logger.info("Launching job ...") follow_logs(instance_log_id, sleep_duration) else: log_file_contents = ResourceClient().get_content(instance_log_id) if len(log_file_contents.strip()): floyd_logger.info(log_file_contents.rstrip()) else: floyd_logger.info("Launching job now. Try after a few seconds.")
def follow_logs(instance_log_id, sleep_duration=1): """ Follow the logs until Job termination. """ cur_idx = 0 job_terminated = False while not job_terminated: # Get the logs in a loop and log the new lines log_file_contents = ResourceClient().get_content(instance_log_id) print_output = log_file_contents[cur_idx:] # Get the status of the Job from the current log line job_terminated = any(terminal_output in print_output for terminal_output in TERMINATION_OUTPUT_LIST) cur_idx += len(print_output) sys.stdout.write(print_output) sleep(sleep_duration)
def output(id, url): """ Shows the url of the dataset. You can use id or a friendly URI. By default opens the output page in your default browser. """ data_source = DataClient().get(id) if not data_source: sys.exit() resource = ResourceClient().get(data_source.resource_id) data_url = "{}/viewer/{}".format(floyd.floyd_host, resource.uri) if url: floyd_logger.info(data_url) else: floyd_logger.info("Opening output directory in your browser ...") webbrowser.open(data_url)
def __iter__(self): for c in ResourceClient().wait_for_ready(self.resource_id): yield c