def restore_monitor_stack(test_id, date_time=None): if not is_docker_available(): return False monitor_stack_archives = get_monitor_set_archives(test_id) arch = get_monitor_stack_archive(monitor_stack_archives, date_time) if not arch: return False # Arch element structure: # { # "file_path": log_file, # "type": log_type, # "link": link to archive, # "date": date of create # } LOGGER.info('Restoring monitoring stack from archive %s', arch['file_path']) monitor_stack_base_dir = tempfile.mkdtemp() LOGGER.info('Download file {} to directory {}'.format( arch['link'], monitor_stack_base_dir)) downloaded_monitor_archive = S3Storage().download_file( arch['link'], dst_dir=monitor_stack_base_dir) monitor_data_arch = extract_monitor_data_archive( downloaded_monitor_archive, monitor_stack_base_dir) monitor_stack_arch = extract_monitor_stack_archive( downloaded_monitor_archive, monitor_stack_base_dir) if not monitor_data_arch: LOGGER.error("No prometheus snapshot were found in arch %s", arch['file_path']) return False if not monitor_stack_arch: LOGGER.error("No monitor stack archive were found in arch %s", arch['file_path']) return False monitor_data_dir = create_monitoring_data_dir(monitor_stack_base_dir, monitor_data_arch) monitor_stack_dir = create_monitoring_stack_dir(monitor_stack_base_dir, monitor_stack_arch) if not monitor_stack_dir or not monitor_data_dir: LOGGER.error( 'Creating monitor stack directories failed:\ndata_dir: %s; stack_dir: %s', monitor_data_dir, monitor_stack_dir) _, scylla_version = get_monitorstack_scylla_version(monitor_stack_dir) status = start_dockers(monitor_stack_dir, monitor_data_dir, scylla_version) if status: upload_sct_dashboards(monitor_stack_dir, scylla_version) upload_annotations(monitor_stack_dir) return status else: LOGGER.error('Error during dockers starting. Trying next arhive') remove_files(monitor_stack_base_dir) return False
def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements if not is_docker_available(): return False arch = get_monitoring_stack_archive(test_id, date_time) if not arch: return False # Arch element structure: # { # "file_path": log_file, # "type": log_type, # "link": link to archive, # "date": date of create # } LOGGER.info('Restoring monitoring stack from archive %s', arch['file_path']) monitoring_stack_base_dir = tempfile.mkdtemp() LOGGER.info('Download file {} to directory {}'.format(arch['link'], monitoring_stack_base_dir)) downloaded_monitoring_archive = S3Storage().download_file(arch['link'], dst_dir=monitoring_stack_base_dir) monitoring_data_arch = extract_monitoring_data_archive(downloaded_monitoring_archive, monitoring_stack_base_dir) monitoring_stack_arch = extract_monitoring_stack_archive(downloaded_monitoring_archive, monitoring_stack_base_dir) if not monitoring_data_arch: LOGGER.error("No prometheus snapshot were found in arch %s", arch['file_path']) return False if not monitoring_stack_arch: LOGGER.error("No monitoring stack archive were found in arch %s", arch['file_path']) return False monitoring_data_dir = create_monitoring_data_dir(monitoring_stack_base_dir, monitoring_data_arch) monitoring_stack_dir = create_monitoring_stack_dir(monitoring_stack_base_dir, monitoring_stack_arch) if not monitoring_stack_dir or not monitoring_data_dir: LOGGER.error('Creating monitoring stack directories failed:\ndata_dir: %s; stack_dir: %s', monitoring_data_dir, monitoring_stack_dir) _, scylla_version = get_monitoring_stack_scylla_version(monitoring_stack_dir) status = run_monitoring_stack_containers(monitoring_stack_dir, monitoring_data_dir, scylla_version) if not status: return False status = restore_grafana_dashboards_and_annotations(monitoring_stack_dir, scylla_version) if not status: return False status = verify_monitoring_stack(scylla_version) if not status: remove_files(monitoring_stack_base_dir) return False LOGGER.info("Monitoring stack is running") return True
def upload_logs(archive_path, storing_path): s3_link = S3Storage().upload_file(file_path=archive_path, dest_dir=storing_path) return s3_link