def update_plots(self): # Determine plot location and names html_stub = 'database_monitor_{}_component.html' js_stub = 'database_monitor_{}_component.js' self.refs["fig_jwst"].data = self.jwst_table self.refs["fig_caom"].data = self.caom_table # Save the plots as components for name in ["jwst", "caom"]: script, div = self.embed("fig_"+name) html_name = html_stub.format(name) js_name = js_stub.format(name) div_outfile = os.path.join(self.output_dir, 'monitor_mast', html_name) with open(div_outfile, 'w') as f: f.write(div) f.close() set_permissions(div_outfile) script_outfile = os.path.join(self.output_dir, 'monitor_mast', js_name) with open(script_outfile, 'w') as f: f.write(script) f.close() set_permissions(script_outfile) logging.info('Saved Bokeh components files: {} and {}'.format(html_name, js_name))
def configure_logging(module, production_mode=True, path='./'): """Configure the log file with a standard logging format. Parameters ---------- module : str The name of the module being logged. production_mode : bool Whether or not the output should be written to the production environement. path : str Where to write the log if user-supplied path; default to working dir. """ # Determine log file location if production_mode: log_file = make_log_file(module) else: log_file = make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode # Create the log file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file)
def test_file_group(test_file): """Create a file with the standard permissions ``('-rw-r--r--')`` and default group. Modify the group and set the default permissions defined in ``permissions.py``. Assert that both group and permissions were set correctly. Parameters ---------- test_file : str Path of file used for testing """ # Get owner and group on the current system. owner = get_owner_string(test_file) group = get_group_string(test_file) # attempt to retrieve a group name different from default group_index = 0 test_group = grp.getgrgid(os.getgroups()[group_index]).gr_name set_permissions(test_file, group=test_group, owner=owner) assert has_permissions(test_file, group=test_group, owner=owner) # return to default group set_permissions(test_file, owner=owner, group=group) assert has_permissions(test_file, owner=owner, group=group)
def save_image(self, fname, thumbnail=False): """ Save an image in the requested output format and sets the appropriate permissions Parameters ---------- image : obj A ``matplotlib`` figure object fname : str Output filename thumbnail : bool True if saving a thumbnail image, false for the full preview image. """ plt.savefig(fname, bbox_inches='tight', pad_inches=0) permissions.set_permissions(fname) # If the image is a thumbnail, rename to '.thumb' if thumbnail: thumb_fname = fname.replace('.jpg', '.thumb') os.rename(fname, thumb_fname) logging.info(f'Saved image to {thumb_fname}') else: logging.info(f'Saved image to {fname}')
def test_file_permissions(test_file): """Create a file with the standard permissions ``('-rw-r--r--')``. Set the default permissions defined in ``permissions.py``. Assert that these were set correctly. Parameters ---------- test_file : str Path of file used for testing """ # Get owner and group on the current system. owner = get_owner_string(test_file) group = get_group_string(test_file) set_permissions(test_file, owner=owner, group=group) assert has_permissions(test_file, owner=owner, group=group)
def test_directory_permissions(test_directory): """Create a directory with the standard permissions ``('-rw-r--r--')``. Set the default permissions defined in ``permissions.py``. Assert that these were set correctly. Parameters ---------- test_directory : str Path of directory used for testing """ # Get owner and group on the current system.This allows to implement the tests # independently from the user. owner = get_owner_string(test_directory) group = get_group_string(test_directory) print('\nCurrent owner={} group={}'.format(owner, group)) set_permissions(test_directory, owner=owner, group=group) assert has_permissions(test_directory, owner=owner, group=group)
def monitor_template_main(): """ The main function of the ``monitor_template`` module.""" # Example of logging my_variable = 'foo' logging.info('Some useful information: {}'.format(my_variable)) # Example of querying for a dataset via MAST API service = "Mast.Jwst.Filtered.Niriss" params = { "columns": "filename", "filters": [{ "paramName": "filter", "values": ['F430M'] }] } response = Mast.service_request_async(service, params) result = response[0].json()['data'] filename_of_interest = result[0][ 'filename'] # jw00304002001_02102_00001_nis_uncal.fits # Example of parsing a filename filename_dict = filename_parser(filename_of_interest) # Contents of filename_dict: # {'program_id': '00304', # 'observation': '002', # 'visit': '001', # 'visit_group': '02', # 'parallel_seq_id': '1', # 'activity': '02', # 'exposure_id': '00001', # 'detector': 'nis', # 'suffix': 'uncal'} # Example of locating a dataset in the filesystem filesystem = get_config()['filesystem'] dataset = os.path.join(filesystem, 'jw{}'.format(filename_dict['program_id']), filename_of_interest) # Example of reading in dataset using jwst.datamodels im = datamodels.open(dataset) # Now have access to: # im.data # Data array # im.err # ERR array # im.meta # Metadata such as header keywords # Example of saving a file and setting permissions im.save('some_filename.fits') set_permissions('some_filename.fits') # Example of creating and exporting a Bokeh plot plt = Donut(im.data, plot_width=600, plot_height=600) plt.sizing_mode = 'stretch_both' # Necessary for responsive sizing on web app script, div = components(plt) plot_output_dir = get_config()['outputs'] div_outfile = os.path.join(plot_output_dir, 'monitor_name', filename_of_interest + "_component.html") script_outfile = os.path.join(plot_output_dir, 'monitor_name', filename_of_interest + "_component.js") for outfile, component in zip([div_outfile, script_outfile], [div, script]): with open(outfile, 'w') as f: f.write(component) f.close() set_permissions(outfile) # Perform any other necessary code well_named_variable = "Function does something." result_of_second_function = second_function(well_named_variable)
def generate_preview_images(): """The main function of the ``generate_preview_image`` module.""" # Begin logging logging.info("Beginning the script run") filesystem = get_config()['filesystem'] preview_image_filesystem = get_config()['preview_image_filesystem'] thumbnail_filesystem = get_config()['thumbnail_filesystem'] filenames = glob(os.path.join(filesystem, '*/*.fits')) grouped_filenames = group_filenames(filenames) logging.info(f"Found {len(filenames)} filenames") for file_list in grouped_filenames: filename = file_list[0] # Determine the save location try: identifier = 'jw{}'.format(filename_parser(filename)['program_id']) except ValueError as error: identifier = os.path.basename(filename).split('.fits')[0] preview_output_directory = os.path.join(preview_image_filesystem, identifier) thumbnail_output_directory = os.path.join(thumbnail_filesystem, identifier) # Check to see if the preview images already exist and skip # if they do file_exists = check_existence(file_list, preview_output_directory) if file_exists: logging.info( "JPG already exists for {}, skipping.".format(filename)) continue # Create the output directories if necessary if not os.path.exists(preview_output_directory): os.makedirs(preview_output_directory) permissions.set_permissions(preview_output_directory) logging.info(f'Created directory {preview_output_directory}') if not os.path.exists(thumbnail_output_directory): os.makedirs(thumbnail_output_directory) permissions.set_permissions(thumbnail_output_directory) logging.info(f'Created directory {thumbnail_output_directory}') # If the exposure contains more than one file (because more # than one detector was used), then create a mosaic max_size = 8 numfiles = len(file_list) if numfiles != 1: try: mosaic_image, mosaic_dq = create_mosaic(file_list) logging.info('Created mosiac for:') for item in file_list: logging.info(f'\t{item}') except (ValueError, FileNotFoundError) as error: logging.error(error) dummy_file = create_dummy_filename(file_list) if numfiles in [2, 4]: max_size = 16 elif numfiles in [8]: max_size = 32 # Create the nominal preview image and thumbnail try: im = PreviewImage(filename, "SCI") im.clip_percent = 0.01 im.scaling = 'log' im.cmap = 'viridis' im.output_format = 'jpg' im.preview_output_directory = preview_output_directory im.thumbnail_output_directory = thumbnail_output_directory # If a mosaic was made from more than one file # insert it and it's associated DQ array into the # instance of PreviewImage. Also set the input # filename to indicate that we have mosaicked data if numfiles != 1: im.data = mosaic_image im.dq = mosaic_dq im.file = dummy_file im.make_image(max_img_size=max_size) except ValueError as error: logging.warning(error) # Complete logging: logging.info("Completed.")
def jwst_inventory(instruments=JWST_INSTRUMENTS, dataproducts=['image', 'spectrum', 'cube'], caom=False, plot=False): """Gather a full inventory of all JWST data in each instrument service by instrument/dtype Parameters ---------- instruments: sequence The list of instruments to count dataproducts: sequence The types of dataproducts to count caom: bool Query CAOM service plot: bool Return a pie chart of the data Returns ------- astropy.table.table.Table The table of record counts for each instrument and mode """ logging.info('Searching database...') # Iterate through instruments inventory, keywords = [], {} for instrument in instruments: ins = [instrument] for dp in dataproducts: count = instrument_inventory(instrument, dataproduct=dp, caom=caom) ins.append(count) # Get the total ins.append(sum(ins[-3:])) # Add it to the list inventory.append(ins) # Add the keywords to the dict keywords[instrument] = instrument_keywords(instrument, caom=caom) logging.info( 'Completed database search for {} instruments and {} data products.'. format(instruments, dataproducts)) # Make the table all_cols = ['instrument'] + dataproducts + ['total'] table = pd.DataFrame(inventory, columns=all_cols) # Melt the table table = pd.melt(table, id_vars=['instrument'], value_vars=dataproducts, value_name='files', var_name='dataproduct') # Plot it if plot: # Determine plot location and names output_dir = get_config()['outputs'] if caom: output_filename = 'database_monitor_caom' else: output_filename = 'database_monitor_jwst' # Make the plot plt = Donut(table, label=['instrument', 'dataproduct'], values='files', text_font_size='12pt', hover_text='files', name="JWST Inventory", plot_width=600, plot_height=600) # Save the plot as full html html_filename = output_filename + '.html' outfile = os.path.join(output_dir, 'monitor_mast', html_filename) output_file(outfile) save(plt) set_permissions(outfile) logging.info( 'Saved Bokeh plots as HTML file: {}'.format(html_filename)) # Save the plot as components plt.sizing_mode = 'stretch_both' script, div = components(plt) div_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.html") with open(div_outfile, 'w') as f: f.write(div) f.close() set_permissions(div_outfile) script_outfile = os.path.join(output_dir, 'monitor_mast', output_filename + "_component.js") with open(script_outfile, 'w') as f: f.write(script) f.close() set_permissions(script_outfile) logging.info( 'Saved Bokeh components files: {}_component.html and {}_component.js' .format(output_filename, output_filename)) return table, keywords
def update_plots(self): """ Update the ColumnDataSource objects for the filesystem monitor plots. """ logging.info("Beginning filesystem statistics monitor plot updates") # We'll ensure that all the statistics are in the correct formats dates = np.array(self.statistics['timestamp'], dtype='datetime64') self.refs['source_filecount'].data = { 'dates': dates, 'filecount': np.array(self.statistics['file_count'], dtype=float) } self.refs['source_stats'].data = { 'dates': dates, 'systemsize': np.array(self.statistics['total'], dtype=float) / (1024.**3), 'freesize': np.array(self.statistics['available'], dtype=float) / (1024.**3), 'usedsize': np.array(self.statistics['used'], dtype=float) / (1024.**3) } self.refs['source_files'].data = { 'dates': dates, 'fits': np.array(self.results['fits_files'], dtype=int), 'uncal': np.array(self.results['uncal'], dtype=int), 'cal': np.array(self.results['cal'], dtype=int), 'rate': np.array(self.results['rate'], dtype=int), 'rateint': np.array(self.results['rateint'], dtype=int), 'i2d': np.array(self.results['i2d'], dtype=int), 'nrc': np.array(self.results['nrc'], dtype=int), 'nrs': np.array(self.results['nrs'], dtype=int), 'nis': np.array(self.results['nis'], dtype=int), 'mir': np.array(self.results['mir'], dtype=int), 'fgs': np.array(self.results['gui'], dtype=int) } self.refs['source_sizes'].data = { 'dates': dates, 'fits': np.array(self.sizes['fits_files'], dtype=float) / (1024.**3), 'uncal': np.array(self.sizes['uncal'], dtype=float) / (1024.**3), 'cal': np.array(self.sizes['cal'], dtype=float) / (1024.**3), 'rate': np.array(self.sizes['rate'], dtype=float) / (1024.**3), 'rateint': np.array(self.sizes['rateint'], dtype=float) / (1024.**3), 'i2d': np.array(self.sizes['i2d'], dtype=float) / (1024.**3), 'nrc': np.array(self.sizes['nrc'], dtype=float) / (1024.**3), 'nrs': np.array(self.sizes['nrs'], dtype=float) / (1024.**3), 'nis': np.array(self.sizes['nis'], dtype=float) / (1024.**3), 'mir': np.array(self.sizes['mir'], dtype=float) / (1024.**3), 'fgs': np.array(self.sizes['gui'], dtype=float) / (1024.**3) } # Write scripts out to files for name in [ 'filecount', 'system_stats', 'filecount_type', 'size_type' ]: script, div = self.embed('fig_' + name) div_outfile = os.path.join(self.outputs_dir, "{}_component.html".format(name)) with open(div_outfile, 'w') as f: f.write(div) f.close() set_permissions(div_outfile) script_outfile = os.path.join(self.outputs_dir, "{}_component.js".format(name)) with open(script_outfile, 'w') as f: f.write(script) f.close() set_permissions(script_outfile) logging.info( 'Saved components files: {}_component.html and {}_component.js' .format(name, name)) logging.info('Filesystem statistics plot updates complete.')
def monitor(self): """ Monitoring script to inventory the JWST filesystem, save file statistics, and generate plots. """ # Begin logging logging.info('Beginning filesystem monitoring.') # re-initialize dictionaries for output results_dict = defaultdict(int) size_dict = defaultdict(float) # Walk through all directories recursively and count files logging.info('Searching filesystem...') for dirpath, dirs, files in os.walk(self.filesystem): results_dict['file_count'] += len( files) # find number of all files for filename in files: file_path = os.path.join(dirpath, filename) if filename.endswith( ".fits"): # find total number of fits files results_dict['fits_files'] += 1 size_dict['size_fits'] += os.path.getsize(file_path) suffix = filename_parser(filename)['suffix'] results_dict[suffix] += 1 size_dict[suffix] += os.path.getsize(file_path) detector = filename_parser(filename)['detector'] instrument = detector[ 0: 3] # first three characters of detector specify instrument results_dict[instrument] += 1 size_dict[instrument] += os.path.getsize(file_path) logging.info('{} files found in filesystem'.format( results_dict['fits_files'])) # Get df style stats on file system out = subprocess.check_output('df {}'.format(self.filesystem), shell=True) outstring = out.decode( "utf-8") # put into string for parsing from byte format parsed = outstring.split(sep=None) # Select desired elements from parsed string stats = { 'total': int(parsed[8]), # in blocks of 512 bytes 'used': int(parsed[9]), 'available': int(parsed[10]), 'percent_used': parsed[11], 'file_count': results_dict.pop('file_count'), 'timestamp': datetime.datetime.now().isoformat( sep='T', timespec='auto') # get date of stats } #store results & sizes in the appropriate dictionaries for key, val in results_dict.items(): self.results[key].append(val) for key, val in size_dict.items(): self.sizes[key].append(val) for key, val in stats.items(): self.statistics[key].append(val) # set up output file and write stats statsfile = os.path.join(self.outputs_dir, 'statsfile.txt') with open(statsfile, "a+") as f: f.write( "{timestamp} {file_count:15d} {total:15d} {available:15d} {used:15d} {percent_used}\n" .format(**stats)) set_permissions(statsfile) logging.info('Saved file statistics to: {}'.format(statsfile)) output_stub = "{fits_files} {uncal} {cal} {rate} {rateints} {i2d} {nrc} {nrs} {nis} {mir} {gui}\n" # set up and read out stats on files by type filesbytype = os.path.join(self.outputs_dir, 'filesbytype.txt') with open(filesbytype, "a+") as f2: f2.write(output_stub.format(**results_dict)) set_permissions(filesbytype, verbose=False) logging.info('Saved file statistics by type to {}'.format(filesbytype)) # set up file size by type file sizebytype = os.path.join(self.outputs_dir, 'sizebytype.txt') with open(sizebytype, "a+") as f3: f3.write(output_stub.format(**size_dict)) set_permissions(sizebytype, verbose=False) logging.info('Saved file sizes by type to {}'.format(sizebytype)) logging.info('Filesystem statistics calculation complete.') #Update the plots based on new information self.update_plots()
def monitor_filesystem(): """Tabulates the inventory of the JWST filesystem, saving statistics to files, and generates plots. """ # Begin logging logging.info('Beginning filesystem monitoring.') # Get path, directories and files in system and count files in all directories settings = get_config() filesystem = settings['filesystem'] outputs_dir = os.path.join(settings['outputs'], 'monitor_filesystem') # set up dictionaries for output results_dict = defaultdict(int) size_dict = defaultdict(float) # Walk through all directories recursively and count files logging.info('Searching filesystem...') for dirpath, dirs, files in os.walk(filesystem): results_dict['file_count'] += len(files) # find number of all files for filename in files: file_path = os.path.join(dirpath, filename) if filename.endswith(".fits"): # find total number of fits files results_dict['fits_files'] += 1 size_dict['size_fits'] += os.path.getsize(file_path) suffix = filename_parser(filename)['suffix'] results_dict[suffix] += 1 size_dict[suffix] += os.path.getsize(file_path) detector = filename_parser(filename)['detector'] instrument = detector[ 0: 3] # first three characters of detector specify instrument results_dict[instrument] += 1 size_dict[instrument] += os.path.getsize(file_path) logging.info('{} files found in filesystem'.format( results_dict['fits_files'])) # Get df style stats on file system out = subprocess.check_output('df {}'.format(filesystem), shell=True) outstring = out.decode( "utf-8") # put into string for parsing from byte format parsed = outstring.split(sep=None) # Select desired elements from parsed string total = int(parsed[8]) # in blocks of 512 bytes used = int(parsed[9]) available = int(parsed[10]) percent_used = parsed[11] # Save stats for plotting over time now = datetime.datetime.now().isoformat( sep='T', timespec='auto') # get date of stats # set up output file and write stats statsfile = os.path.join(outputs_dir, 'statsfile.txt') with open(statsfile, "a+") as f: f.write("{0} {1:15d} {2:15d} {3:15d} {4:15d} {5}\n".format( now, results_dict['file_count'], total, available, used, percent_used)) set_permissions(statsfile) logging.info('Saved file statistics to: {}'.format(statsfile)) # set up and read out stats on files by type filesbytype = os.path.join(outputs_dir, 'filesbytype.txt') with open(filesbytype, "a+") as f2: f2.write("{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}\n".format( results_dict['fits_files'], results_dict['uncal'], results_dict['cal'], results_dict['rate'], results_dict['rateints'], results_dict['i2d'], results_dict['nrc'], results_dict['nrs'], results_dict['nis'], results_dict['mir'], results_dict['gui'])) set_permissions(filesbytype, verbose=False) logging.info('Saved file statistics by type to {}'.format(filesbytype)) # set up file size by type file sizebytype = os.path.join(outputs_dir, 'sizebytype.txt') with open(sizebytype, "a+") as f3: f3.write("{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}\n".format( size_dict['size_fits'], size_dict['uncal'], size_dict['cal'], size_dict['rate'], size_dict['rateints'], size_dict['i2d'], size_dict['nrc'], size_dict['nrs'], size_dict['nis'], size_dict['mir'], size_dict['gui'])) set_permissions(sizebytype, verbose=False) logging.info('Saved file sizes by type to {}'.format(sizebytype)) logging.info('Filesystem statistics calculation complete.') # Create the plots plot_system_stats(statsfile, filesbytype, sizebytype)
def plot_system_stats(stats_file, filebytype, sizebytype): """Read in the file of saved stats over time and plot them. Parameters ----------- stats_file : str file containing information of stats over time filebytype : str file containing information of file counts by type over time sizebytype : str file containing information on file sizes by type over time """ # get path for files settings = get_config() outputs_dir = os.path.join(settings['outputs'], 'monitor_filesystem') # read in file of statistics date, f_count, sysize, frsize, used, percent = np.loadtxt(stats_file, dtype=str, unpack=True) fits_files, uncalfiles, calfiles, ratefiles, rateintsfiles, i2dfiles, nrcfiles, nrsfiles, nisfiles, mirfiles, fgsfiles = np.loadtxt( filebytype, dtype=str, unpack=True) fits_sz, uncal_sz, cal_sz, rate_sz, rateints_sz, i2d_sz, nrc_sz, nrs_sz, nis_sz, mir_sz, fgs_sz = np.loadtxt( sizebytype, dtype=str, unpack=True) logging.info('Read in file statistics from {}, {}, {}'.format( stats_file, filebytype, sizebytype)) # put in proper np array types and convert to GB sizes dates = np.array(date, dtype='datetime64') file_count = f_count.astype(float) systemsize = sysize.astype(float) / (1024.**3) freesize = frsize.astype(float) / (1024.**3) usedsize = used.astype(float) / (1024.**3) fits = fits_files.astype(int) uncal = uncalfiles.astype(int) cal = calfiles.astype(int) rate = ratefiles.astype(int) rateints = rateintsfiles.astype(int) i2d = i2dfiles.astype(int) nircam = nrcfiles.astype(int) nirspec = nrsfiles.astype(int) niriss = nisfiles.astype(int) miri = mirfiles.astype(int) fgs = fgsfiles.astype(int) fits_size = fits_sz.astype(float) / (1024.**3) uncal_size = uncal_sz.astype(float) / (1024.**3) cal_size = cal_sz.astype(float) / (1024.**3) rate_size = rate_sz.astype(float) / (1024.**3) rateints_size = rateints_sz.astype(float) / (1024.**3) i2d_size = i2d_sz.astype(float) / (1024.**3) nircam_size = nrc_sz.astype(float) / (1024.**3) nirspec_size = nrs_sz.astype(float) / (1024.**3) niriss_size = nis_sz.astype(float) / (1024.**3) miri_size = mir_sz.astype(float) / (1024.**3) fgs_size = fgs_sz.astype(float) / (1024.**3) # plot the data # Plot filecount vs. date p1 = figure(tools='pan,box_zoom,reset,wheel_zoom,save', x_axis_type='datetime', title="Total File Counts", x_axis_label='Date', y_axis_label='Count') p1.line(dates, file_count, line_width=2, line_color='blue') p1.circle(dates, file_count, color='blue') # Plot system stats vs. date p2 = figure(tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', title='System stats', x_axis_label='Date', y_axis_label='GB') p2.line(dates, systemsize, legend='Total size', line_color='red') p2.circle(dates, systemsize, color='red') p2.line(dates, freesize, legend='Free bytes', line_color='blue') p2.circle(dates, freesize, color='blue') p2.line(dates, usedsize, legend='Used bytes', line_color='green') p2.circle(dates, usedsize, color='green') # Plot fits files by type vs. date p3 = figure(tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', title="Total File Counts by Type", x_axis_label='Date', y_axis_label='Count') p3.line(dates, fits, legend='Total fits files', line_color='black') p3.circle(dates, fits, color='black') p3.line(dates, uncal, legend='uncalibrated fits files', line_color='red') p3.diamond(dates, uncal, color='red') p3.line(dates, cal, legend='calibrated fits files', line_color='blue') p3.square(date, cal, color='blue') p3.line(dates, rate, legend='rate fits files', line_color='green') p3.triangle(dates, rate, color='green') p3.line(dates, rateints, legend='rateints fits files', line_color='orange') p3.asterisk(dates, rateints, color='orange') p3.line(dates, i2d, legend='i2d fits files', line_color='purple') p3.x(dates, i2d, color='purple') p3.line(dates, nircam, legend='nircam fits files', line_color='midnightblue') p3.x(dates, nircam, color='midnightblue') p3.line(dates, nirspec, legend='nirspec fits files', line_color='springgreen') p3.x(dates, nirspec, color='springgreen') p3.line(dates, niriss, legend='niriss fits files', line_color='darkcyan') p3.x(dates, niriss, color='darkcyan') p3.line(dates, miri, legend='miri fits files', line_color='dodgerblue') p3.x(dates, miri, color='dodgerblue') p3.line(dates, fgs, legend='fgs fits files', line_color='darkred') p3.x(dates, fgs, color='darkred') # plot size of total fits files by type p4 = figure(tools='pan,box_zoom,wheel_zoom,reset,save', x_axis_type='datetime', title="Total File Sizes by Type", x_axis_label='Date', y_axis_label='GB') p4.line(dates, fits_size, legend='Total fits files', line_color='black') p4.circle(dates, fits_size, color='black') p4.line(dates, uncal_size, legend='uncalibrated fits files', line_color='red') p4.diamond(dates, uncal_size, color='red') p4.line(dates, cal_size, legend='calibrated fits files', line_color='blue') p4.square(date, cal_size, color='blue') p4.line(dates, rate_size, legend='rate fits files', line_color='green') p4.triangle(dates, rate_size, color='green') p4.line(dates, rateints_size, legend='rateints fits files', line_color='orange') p4.asterisk(dates, rateints_size, color='orange') p4.line(dates, i2d_size, legend='i2d fits files', line_color='purple') p4.x(dates, i2d_size, color='purple') p4.line(dates, nircam_size, legend='nircam fits files', line_color='midnightblue') p4.x(dates, nircam_size, color='midnightblue') p4.line(dates, nirspec_size, legend='nirspec fits files', line_color='springgreen') p4.x(dates, nirspec_size, color='springgreen') p4.line(dates, niriss_size, legend='niriss fits files', line_color='darkcyan') p4.x(dates, niriss_size, color='darkcyan') p4.line(dates, miri_size, legend='miri fits files', line_color='dodgerblue') p4.x(dates, miri_size, color='dodgerblue') p4.line(dates, fgs_size, legend='fgs fits files', line_color='darkred') p4.x(dates, fgs_size, color='darkred') # create a layout with a grid pattern to save all plots grid = gridplot([[p1, p2], [p3, p4]]) outfile = os.path.join(outputs_dir, "filesystem_monitor.html") output_file(outfile) save(grid) set_permissions(outfile) logging.info('Saved plot of all statistics to {}'.format(outfile)) # Save each plot's components plots = [p1, p2, p3, p4] plot_names = ['filecount', 'system_stats', 'filecount_type', 'size_type'] for plot, name in zip(plots, plot_names): plot.sizing_mode = 'stretch_both' script, div = components(plot) div_outfile = os.path.join(outputs_dir, "{}_component.html".format(name)) with open(div_outfile, 'w') as f: f.write(div) f.close() set_permissions(div_outfile) script_outfile = os.path.join(outputs_dir, "{}_component.js".format(name)) with open(script_outfile, 'w') as f: f.write(script) f.close() set_permissions(script_outfile) logging.info( 'Saved components files: {}_component.html and {}_component.js'. format(name, name)) logging.info('Filesystem statistics plotting complete.') # Begin logging: logging.info("Completed.")