ax1.grid() plt.grid() plt.xlim([0, 1400000]) ax1.legend(loc='upper left') ax2.legend(loc='upper right') ax1.set_xlabel('Time Sequence / Seconds') ax1.set_ylabel('#Jobs / 500 Seconds') ax2.set_ylabel('Mean #Jobs') plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.savefig(prefix + '_direct_vs_bb_throughput.eps', fmt='eps') log.sub() if __name__ == '__main__': # logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) log = IndentedLoggerAdapter(logging.getLogger(__name__)) log.info('Direct IO vs Cerberus') file_prefix = '1000jobs' figure_no = 0 first_row1 = ['jid', 'submit', 'iput', 'run', 'oput', 'complete', 'wait', 'response'] first_row3 = ['jid', 'submit', 'wait_in', 'iput', 'wait_run', 'run', 'wait_out', 'oput', 'complete', 'wait', 'response'] font = {'size': 16} matplotlib.rc('font', **font) matplotlib.rc('lines', lw=3) cdfPlot(file_prefix) cdfPlot(file_prefix, 'wait')
bbox_inches='tight') # plt.show() def main(train_file, test_file): means, stdevs = learnModel(train_file) win, var = inferenceTest(test_file, means, stdevs) print win print var plotAvgError(win, var) if __name__ == '__main__': # lg.basicConfig(level=lg.DEBUG) lg.basicConfig(level=lg.INFO) log = IndentedLoggerAdapter(lg.getLogger(__name__)) title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0] budget_cnts = [20] budget_cnts = [0, 5, 10, 20, 25] log.info('Processing Temperature') log.add() topic = 'temperature' main('intelTemperatureTrain.csv', 'intelTemperatureTest.csv')
from pnlpipe_lib import * import pnlpipe_lib.dag as dag from pnlpipe_software import BRAINSTools, trainingDataT1AHCC, FreeSurfer import hashlib from plumbum import local, FG from pnlscripts import TemporaryDirectory, dwiconvert_py, alignAndCenter_py, atlas_py, eddy_py, bet_py, wmql_py import pnlpipe_config import logging from python_log_indenter import IndentedLoggerAdapter logger = logging.getLogger(__name__) log = IndentedLoggerAdapter(logger, indent_char='.') OUTDIR = local.path(pnlpipe_config.OUTDIR) def find_caseid(root): return find_tag(root, 'caseid') def _lookupInputKey(key, caseid): try: pathFormat = pnlpipe_config.INPUT_KEYS[key] caseid_placeholder = pnlpipe_config.INPUT_KEYS['caseid_placeholder'] filepath = local.path(pathFormat.replace(caseid_placeholder, caseid)) return filepath except KeyError as e: msg = """Key '{}' not found in pnlpipe_config.py:INPUT_KEYS. It might be misspelled, or you might need to add it if it's missing. """.format(e.args[0]) raise Exception(msg)
def main(train_file, test_file): """ train_data = m * (3 day) shape matrix """ train_data = readInData(train_file) """ test_data = m * (2 day) shape matrix """ test_data = readInData(test_file) h_win_err, h_var_err = hourStationary(train_data, test_data) d_win_err, d_var_err = dayStationary(train_data, test_data) return h_win_err, h_var_err, d_win_err, d_var_err if __name__ == '__main__': # lg.basicConfig(level=lg.DEBUG) lg.basicConfig(level=lg.INFO) log = IndentedLoggerAdapter(lg.getLogger(__name__)) np.set_printoptions(precision=3) title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0] budget_cnts = [0, 5, 10, 20, 25] log.info('Processing temperature') topic = 'temperature' p2_h_win, p2_h_var, p2_d_win, p2_d_var = \
plt.ylim([0, 101]) plt.grid() plt.xlabel('Time Duration / Seconds') plt.ylabel('Cumulative Distribution Function / %') plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.legend(loc='lower right') plt.savefig(prefix + '_dp_vs_fifo_%s.eps' % column, fmt='eps', bbox_inches='tight') log.sub() if __name__ == '__main__': # logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) log = IndentedLoggerAdapter(logging.getLogger(__name__)) log.info('Dynamic Programming vs FIFO') file_prefix = '1000jobs' first_row3 = [ 'jid', 'submit', 'wait_in', 'iput', 'wait_run', 'run', 'wait_out', 'oput', 'complete', 'wait', 'response' ] first_row1 = [ 'jid', 'submit', 'iput', 'run', 'oput', 'complete', 'wait', 'response' ] figure_no = 0 font = {'size': 16} matplotlib.rc('font', **font) matplotlib.rc('lines', lw=3) cdfPlot(file_prefix, 'response') cdfPlot(file_prefix, 'wait')
import os import importlib.util import logging import Framework.Utility as Utility import Framework.JlscaBruteForceCalculation as JlscaBruteForceCalculation from python_log_indenter import IndentedLoggerAdapter logger = IndentedLoggerAdapter(logging.getLogger()) def cw_to_trs(root_dir, cw_dir, cw_date_string, trs_file, num_traces_to_test): cmd = "julia " cmd += os.path.join(root_dir, 'Framework', 'cw_to_trs.jl') + " " cmd += "--cw_date_string {} ".format(cw_date_string) cmd += "--cw_trace_location {} ".format(cw_dir) cmd += "--num_traces {} ".format(num_traces_to_test) cmd += trs_file Utility.execute(cmd) def attack_inc_cpa(root_dir, trs_file, attack_params, jlsca_log_file): cmd = "julia" + " " cmd += os.path.join(root_dir, 'Framework', 'inc_cpa.jl') + " " for key, value in attack_params.items(): cmd += str(key)+ " " cmd += str(value) + " " cmd += trs_file Utility.execute(cmd, jlsca_log_file) def jlsca_analysis(root_dir, ProjectDir, local_setup): logger = IndentedLoggerAdapter(logging.getLogger()) logger.add()
def jlsca_analysis(root_dir, ProjectDir, local_setup): logger = IndentedLoggerAdapter(logging.getLogger()) logger.add() # Load LocalSetup from user input spec = importlib.util.spec_from_file_location(local_setup, os.path.join(os.getcwd(), local_setup)) LocalSetup = importlib.util.module_from_spec(spec) spec.loader.exec_module(LocalSetup) logger.info("Using parameters from LocalSetup:") logger.add() for key, value in LocalSetup.AttackParameters.items(): logger.info(key + " " + str(value)) logger.sub() # Prepare cw files to trs files if not already converted for num_traces_to_test in LocalSetup.TraceNumList: # Trace file to be generated and used in attack trs_file = os.path.join( LocalSetup.TraceDir, LocalSetup.num_samples + "_samples_" + num_traces_to_test + "_traces_data.trs") # Log file to be generated from attack and used in brute force calculation jlsca_log_file = os.path.join( LocalSetup.ProjectDir, '_log', 'jlsca_' + num_traces_to_test + '_traces.log' ) logger.info("Jlsca attack using {:>6} traces:".format(num_traces_to_test)) logger.add() if not os.path.isfile(trs_file): logger.info("Converting CW project to JlSca trs format") cw_to_trs(root_dir, LocalSetup.CWFilesDir, LocalSetup.date_string, trs_file, num_traces_to_test) logger.info("CW files converted into .trs file") logger.info("Running attack Incremental Correlation Power Analysis using Julia") attack_inc_cpa(root_dir, trs_file, LocalSetup.AttackParameters, jlsca_log_file) logger.info("Attack Incremental Correlation Power Analysis completed") logger.info("Running calculation on number of guesses needed based on brute force algorithm") num_of_guesses = JlscaBruteForceCalculation.calculate(jlsca_log_file, LocalSetup.CWKnownKey, "--show-plots" in LocalSetup.AttackParameters) logger.info("Number of guesses calculation completed") logger.info("Number of guesses needed using {:>6} traces: {}".format(num_traces_to_test, num_of_guesses)) logger.sub() logger.sub()
ax2.set_ylabel('Mean #Jobs') ax1.set_ylim([0, 16]) ax2.set_ylim([0, 2.4]) ax2.set_yticks(np.arange(0, 2.5, 0.3)) ax1.grid() plt.grid() plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.savefig(prefix + '_3p_vs_1p_throughput.eps', fmt='eps') log.sub() if __name__ == '__main__': figure_no = 0 # logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) log = IndentedLoggerAdapter(logging.getLogger(__name__)) log.info('3P vs 1P') file_prefix = '1000jobs' first_row3 = [ 'jid', 'submit', 'wait_in', 'iput', 'wait_run', 'run', 'wait_out', 'oput', 'complete', 'wait', 'response' ] first_row1 = [ 'jid', 'submit', 'iput', 'run', 'oput', 'complete', 'wait', 'response' ] font = {'size': 16} matplotlib.rc('font', **font) matplotlib.rc('lines', lw=3) timePlot(file_prefix, 'response') timePlot(file_prefix, 'wait') timePlot(file_prefix, 'wait_in')
return win_errs, var_errs def main(train_file, test_file): """train_data = m * (3 day) shape matrix""" train_data = readInData(train_file) """test_data = m * (2 day) shape matrix""" test_data = readInData(test_file) h_win_err, h_var_err = hourStationary(train_data, test_data) return h_win_err, h_var_err if __name__ == '__main__': # lg.basicConfig(level=lg.DEBUG) lg.basicConfig(level=lg.INFO) log = IndentedLoggerAdapter(lg.getLogger(__name__)) np.set_printoptions(precision=4) title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0] budget_cnts = [0, 5, 10, 20, 25] alpha = 0.08 topic = 'temperature_back_infer' log.info('Processing %s' % topic)
figure_no += 1 labels = ['FCFS Cerberus', 'MaxT Cerberus', 'MaxP Cerberus'] i = 0 for data in [data1, data2, data3]: time = data[column] sorted_time = np.sort(time) log.info('%s\'s %s = %.2f' % (labels[i], column, sorted_time[-1])) i += 1 log.sub() if __name__ == '__main__': # logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) log = IndentedLoggerAdapter(logging.getLogger(__name__)) log.info('Dynamic Programming vs FIFO') file_prefix = '1000jobs' first_row3 = ['jid', 'submit', 'wait_in', 'iput', 'wait_run', 'run', 'wait_out', 'oput', 'complete', 'wait', 'response'] first_row1 = ['jid', 'submit', 'iput', 'run', 'oput', 'complete', 'wait', 'response'] figure_no = 0 font = {'size': 16} cdfPlot(file_prefix, 'response') cdfPlot(file_prefix, 'wait') cdfPlot(file_prefix, 'wait_in') cdfPlot(file_prefix, 'wait_run')
ax2.set_ylabel('Mean #Jobs') ax1.set_ylim([0, 16]) ax2.set_ylim([0, 2.4]) ax2.set_yticks(np.arange(0, 2.5, 0.3)) ax1.grid() plt.grid() plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0)) plt.savefig(prefix + '_3p_vs_1p_throughput.eps', fmt='eps') log.sub() if __name__ == '__main__': figure_no = 0 # logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.INFO) log = IndentedLoggerAdapter(logging.getLogger(__name__)) log.info('3P vs 1P') file_prefix = '1000jobs' first_row3 = ['jid', 'submit', 'wait_in', 'iput', 'wait_run', 'run', 'wait_out', 'oput', 'complete', 'wait', 'response'] first_row1 = ['jid', 'submit', 'iput', 'run', 'oput', 'complete', 'wait', 'response'] font = {'size': 16} matplotlib.rc('font', **font) matplotlib.rc('lines', lw=3) timePlot(file_prefix, 'response') timePlot(file_prefix, 'wait') timePlot(file_prefix, 'wait_in')
def main(train_file, test_file): """train_data = m * (3 day) shape matrix""" train_data = readInData(train_file) """test_data = m * (2 day) shape matrix""" test_data = readInData(test_file) h_win_err, h_var_err = hourStationary(train_data, test_data) return h_win_err, h_var_err if __name__ == '__main__': # lg.basicConfig(level=lg.DEBUG) lg.basicConfig(level=lg.INFO) log = IndentedLoggerAdapter(lg.getLogger(__name__)) np.set_printoptions(precision=4) title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0] # budget_cnts = [5] budget_cnts = [0, 5, 10, 20, 25] alpha = 0.08 topic = 'temperature'
# described here: # https://www.tensorflow.org/alpha/guide/effective_tf2#api_cleanup # # The logger obtained by logging.get_logger() has a single element: an # absl.logging.ABSLHandler object. We manually replace this handler with a # standard StreamHandler. # See: # https://docs.python.org/3/howto/logging-cookbook.html#logging-to-multiple-destinations _std_formatter = logging.Formatter(_logging_format) _std_stream_handler = logging.StreamHandler() _std_stream_handler.setFormatter(_std_formatter) _std_logger = logging.getLogger() _std_logger.handlers = [_std_stream_handler] _std_logger.setLevel(_LOGGING_LEVEL) std_logger = IndentedLoggerAdapter(_std_logger) _logger_level_name = logging.getLevelName(std_logger.getEffectiveLevel()) # Turn off the TensorFlow logging. # 0 = all messages are logged (default behavior) # 1 = INFO messages are not printed # 2 = INFO and WARNING messages are not printed # 3 = INFO, WARNING, and ERROR messages are not printed # See: https://stackoverflow.com/a/42121886 TensorFlowLoggingLevels = { logging.INFO: '0', logging.WARNING: '1', logging.ERROR: '2', 'silent': '3', }
import logging import logging.config from pathlib import Path from python_log_indenter import IndentedLoggerAdapter logging_dir = Path('logs') logging_dir.mkdir(parents=True, exist_ok=True) logging.basicConfig( filename=logging_dir / 'scrape_maps.log', format='%(levelname)7s - %(asctime)s: %(message)s' ) log = IndentedLoggerAdapter(logging.getLogger(__name__))