def test_rdtsc(coop=True, repetitions=config.REPETITIONS): ''' Measure the execution time of our timers. This needs a custom version of firefox where performance.rdtsc is implemented. Reference the executable in the config file. Writes the output in a different file than the other experiments. Parameters: coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing RDTSC') if not config.RDTSC_EXEC['firefox']: print('You have not defined the path to custom rdtsc Firefox.') print('Please set it up properly in the config file.') print('Skipping...') return results = {} fp = webdriver.FirefoxProfile() binary = FirefoxBinary(config.RDTSC_EXEC['firefox']) with (webdriver.Firefox(firefox_binary=binary, firefox_profile=fp)) as driver: if coop: driver.get(config.URLS['rdtsc'] + "?coop=True") else: driver.get(config.URLS['rdtsc']) driver.close() results['name'] = 'rdtsc' results['coop'] = coop utility.write_results(results, 'rdtsc', 'firefox') print('Done') print()
def test_rdtsc(coop=True, repetitions=config.REPETITIONS): ''' Measure the execution time of our timers. This needs a custom version of Chromium where performance.rdtsc is implemented. Reference the executable in the config file. Writes the output in a different file than the other experiments. Parameters: coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing RDTSC') if not config.RDTSC_EXEC['chrome']: print('You have not defined the path to custom rdtsc Chrome.') print('Please set it up properly in the config file.') print('Also set the adequate binding to the chromedriver.') print('Skipping...') return results = {} options = Options() options.binary_location = config.RDTSC_EXEC['chrome'] driver_address = get_driver('rdtsc') with (webdriver.Chrome(driver_address, options=options)) as driver: if coop: driver.get(config.URLS['rdtsc'] + "?coop=True") else: driver.get(config.URLS['rdtsc']) results = driver.execute_script("return getTimerMeasurements(" + str(repetitions) + ")") driver.close() results['name'] = 'rdtsc' results['coop'] = coop utility.write_results(results, 'rdtsc', 'chrome') print('Done') print()
def test_interpolation_distribution(version, coop=True, repetitions=config.REPETITIONS): ''' Test repeatedly the number of maximum incrementations in a clock edge. This gives indication of both resolution (average number of incrementations) and jitter (variance of the number of ticks). It also allows to plot the histogram of the distribution of the number of incrementations. Writes the result in the folder div. Parameters: version(int): Tested version coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing the number of ticks in a clock period on Chrome ' + str(version) + ", " + str(repetitions) + ' repetitions.') if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') options = Options() options.binary_location = config.BROWSER_DIR['chrome'] + "chrome-" + str( version) + "/opt/google/chrome/google-chrome" driver_address = get_driver(version) timings = {'Failed'} try: with (webdriver.Chrome(driver_address, options=options)) as driver: if coop: driver.get(config.URLS['distribution'] + "?coop=True") else: driver.get(config.URLS['distribution']) try: timings = driver.execute_script("return test_clock_edges(" + str(repetitions) + ")") except: print('Failed executing script.') driver.close() except Exception as e: print('Something went wrong when starting the browser.') print(e) if timings != {'Failed'}: stats = utility.get_stats(timings) stats['name'] = 'Tick distibution' stats['version'] = version stats['coop'] = coop utility.write_results(stats, version, 'chrome') else: print('Something went wrong.') print('Done') print()
def test_sab(version, coop=True): ''' Perform the availablity checks for SABs for a specific version. Then writes in a file results: availablity and is COOP/COEP enabled. Parameters: version(int): Tested version coop(bool): True to activate coop/coep, False otherwise. ''' print('Testing the availability of SABS in a clock period on Chrome ' + str(version)) if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') options = Options() options.binary_location = config.BROWSER_DIR['chrome'] + "chrome-" + str( version) + "/opt/google/chrome/google-chrome" driver_address = get_driver(version) with (webdriver.Chrome(driver_address, options=options)) as driver: if coop: driver.get(config.URLS['hit_miss'] + "?coop=True") else: driver.get(config.URLS['hit_miss']) try: sab_available = (driver.execute_script( """if (SharedArrayBuffer=='undefined'){return 'False'} else{return 'True'}""" )) except Exception as e: sab_available = False driver.close() stats = { 'name': 'SAB availablity', 'sab_available': sab_available, 'coop': coop, 'version': version, } utility.write_results(stats, version, 'chrome') print('SABs available: ' + str(sab_available)) print('Done') print()
def main(argv): # prepare training data train_instances = load_instances("../../data/train") X_train = generate_features(train_instances) y_train = load_labels(train_instances) # build a model model = train_model(X_train, y_train) # prepare test data test_instances = load_instances("../../data/test") X_test = generate_features(test_instances) timestamps = load_timestamps(test_instances) # predict y_test = test_model(X_test, model) classlabels = convert_to_classlabels(y_test) # save results write_results(timestamps, classlabels, "./fingersense-test-labels.csv")
def main(argv): # prepare training data train_instances = load_instances("../../data/train") X_train = generate_features(train_instances) y_train = load_labels(train_instances) # build a model model = train_model(X_train, y_train) # prepare test data test_instances = load_instances("../../data/test") X_test = generate_features(test_instances) timestamps = load_timestamps(test_instances) # predict y_test = test_model(X_test, model) classlabels = convert_to_classlabels(y_test) # save results write_results(timestamps, classlabels, "./fingersense-test-labels.csv")
def test_interpolation_distribution(version, coop=True, repetitions=config.REPETITIONS): ''' Test repeatedly the number of maximum incrementations in a clock edge. This gives indication of both resolution (average number of incrementations) and jitter (variance of the number of ticks). It also allows to plot the histogram of the distribution of the number of incrementations. Writes the result in the folder div. Parameters: version(int): Tested version coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing the number of ticks in a clock period on Firefox ' + str(version) + ", " + str(repetitions) + ' repetitions.') if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') fp = webdriver.FirefoxProfile() binary = FirefoxBinary(config.BROWSER_DIR['firefox'] + "firefox-" + str(version) + "/firefox") with (webdriver.Firefox(firefox_binary=binary, firefox_profile=fp)) as driver: if coop: driver.get(config.URLS['distribution'] + "?coop=True") else: driver.get(config.URLS['distribution']) timings = driver.execute_script("return test_clock_edges(" + str(repetitions) + ")") driver.close() stats = utility.get_stats(timings) stats['name'] = 'Tick distibution' stats['version'] = version stats['coop'] = coop utility.write_results(stats, version, 'firefox') print('Done') print()
def main(): model = grid_search(create_model, (X_train, Y_train), (X_val, Y_val), 300, 10, param_grid=PARAM_GRID, monitor_value='val_mee', ts=(X_test, Y_test), max_evals=100, n_threads=5, path_results='out/cup/grid_search_results.csv', verbose=True) model.plot_loss(val=False, test=True, show=True) model.plot_metric(val=False, test=True, show=True) err_tr, acc_tr = model.evaluate(X_train, Y_train) err_ts, acc_ts = model.evaluate(X_test, Y_test) errors = [err_tr, err_ts] accuracy = [acc_tr, acc_ts] res = { 'Error': errors, 'Accuracy': accuracy, } print(res) path_err = 'out/cup/grid_search_err' path_acc = 'out/cup/grid_search_acc' path_result_bestmodel = 'out/cup/grid_search_best_results.csv' save = (path_err, path_acc, path_result_bestmodel) write_results(res, model, save_plot_loss=path_err, save_plot_metric=path_acc, save_result=path_result_bestmodel, validation=False, test=True, show=True)
''' # Loading the test data test_instances = load_instances("data/test") X_test = generate_features(test_instances) # fiting the svm model y_test = svm_model(ker="rbf",softmargin=10,training_set=X_train.T, training_labels= y_train.T, testing_set = X_test.T) #y_test = logisticRegression_model(X_train.T,y_train.T,X_test.T) # prepare test data timestamps = load_timestamps(test_instances) # predict #y_test = test_model(X_test, model) classlabels = convert_to_classlabels(y_test) # save results #print(accuracy(y_test_labels,y_test)) write_results(timestamps, classlabels, "./fingersense-test-labels.csv") # In[ ]:
def test_hits_misses(version, clock_method, coop=True, repetitions=config.REPETITIONS): ''' Compute the access time of cache hits and misses for a specific clock. This method can take a while to run, especially with a lot of repetitions or a huge cache. Also note that running computations on the side will create a lot of noise, as the cache is shared by all processes. Timing measured by this measurement can be used to determine error rate as well as the impact of repetitions Parameters: version(int): Tested version clock_method(string): Which clock to use. Can be 'SharedArrayBuffer' or 'performance.now'. coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing hits/misses on Chrome ' + str(version) + " with " + clock_method + ", " + str(repetitions) + ' repetitions.') if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') options = Options() options.binary_location = config.BROWSER_DIR['chrome'] + "chrome-" + str( version) + "/opt/google/chrome/google-chrome" driver_address = get_driver(version) timings = {'Failed'} results = {} with (webdriver.Chrome(driver_address, options=options)) as driver: if coop: driver.get(config.URLS['hit_miss'] + "?coop=True") else: driver.get(config.URLS['hit_miss']) try: driver.set_script_timeout(10000000000) results = driver.execute_script( "return getHitMiss('" + clock_method + "'," + str(repetitions) + ")") #This might fail if SABs are unavailable so we set a try except Exception as e: # This exception occurs when we try to access to SABs without coop/coep on later versions. print(e) print('SABs are not available here, skipping') return driver.close() if results == {}: print('Something went wrong, skipping') return stat_hits = utility.get_stats(results['hits']) stat_hits['name'] = 'hit/miss' stat_hits['clock_method'] = clock_method stat_hits['hit/miss'] = 'hits' stat_hits['version'] = version stat_hits['coop'] = coop utility.write_results(stat_hits, version, 'chrome') stat_misses = utility.get_stats(results['misses']) stat_misses['name'] = 'hit/miss' stat_misses['clock_method'] = clock_method stat_misses['hit/miss'] = 'misses' stat_misses['version'] = version stat_misses['coop'] = coop utility.write_results(stat_misses, version, 'chrome') print('Done') print()
def test_sab(version, coop=True): ''' Perform the availablity checks for SABs by testing different sets of flags for a specific version. As long as no set works, we keep adding some. If no flag work, we abandon. Then writes in a file results: availablity, needed flags, is COOP/COEP enabled. Parameters: version(int): Tested version coop(bool): True to activate coop/coep, False otherwise. ''' print('Testing the availability of SABS in a clock period on Firefox ' + str(version)) if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') sab_available = False # Flag for while loop, set to true when sabs are availables flag_dict = { 'javascript.options.shared_memory': False, 'dom.postMessage.sharedArrayBuffer.withCOOP_COEP': False, 'browser.tabs.remote.useCrossOriginEmbedderPolicy': False, 'browser.tabs.remote.useCrossOriginOpenerPolicy': False, } flags = [] # Store the needed flags while not sab_available: #As long as sab are unavailable, we keep adding. Might need to add potential new flags here. sab_available = test_sab_flag(version, flags, coop) if ((not sab_available) and (not flag_dict['javascript.options.shared_memory']) ): # Flags for versions 58 and later, befor COOP/COEP print( "Test of SharedArrayBuffer failed, trying with javascript.options.shared_memory" ) flags.append(('javascript.options.shared_memory', True)) flag_dict['javascript.options.shared_memory'] = True elif ( (not sab_available) and (not flag_dict['dom.postMessage.sharedArrayBuffer.withCOOP_COEP']) ): #flag required after the implementation of coop/coep, but not their official release print( "Test of SharedArrayBuffer failed, trying with dom.postMessage.sharedArrayBuffer.withCOOP_COEP" ) flags.append( ('dom.postMessage.sharedArrayBuffer.withCOOP_COEP', True)) flags.append( ('browser.tabs.remote.useCrossOriginEmbedderPolicy', True)) flags.append( ('browser.tabs.remote.useCrossOriginOpenerPolicy', True)) flag_dict['dom.postMessage.sharedArrayBuffer.withCOOP_COEP'] = True flag_dict[ 'browser.tabs.remote.useCrossOriginEmbedderPolicy'] = True flag_dict['browser.tabs.remote.useCrossOriginOpenerPolicy'] = True else: break stats = { 'name': 'SAB availablity', 'sab_available': sab_available, 'coop': coop, 'version': version, 'flags': flags } utility.write_results(stats, version, 'firefox') print("SharedArrayBuffer: " + str(sab_available) + " (Can require flags.)") print()
def test_hits_misses(version, clock_method, coop=True, repetitions=config.REPETITIONS): ''' Compute the access time of cache hits and misses for a specific clock. This method can take a while to run, especially with a lot of repetitions or a huge cache. Also note that running computations on the side will create a lot of noise, as the cache is shared by all processes. Timing measured by this measurement can be used to determine error rate as well as the impact of repetitions Parameters: version(int): Tested version clock_method(string): Which clock to use. Can be 'SharedArrayBuffer' or 'performance.now'. coop(bool): True to activate coop/coep, False otherwise. repetitions(int): The number of measurements. Default is set in config file. ''' print('Testing hits/misses on Firefox ' + str(version) + " with " + clock_method + ", " + str(repetitions) + ' repetitions.') if coop: print('COOP/COEP is on.') else: print('COOP/COEP is off.') fp = webdriver.FirefoxProfile() binary = FirefoxBinary(config.BROWSER_DIR['firefox'] + "firefox-" + str(version) + "/firefox") with (webdriver.Firefox(firefox_binary=binary, firefox_profile=fp)) as driver: if coop: driver.get(config.URLS['hit_miss'] + "?coop=True") else: driver.get(config.URLS['hit_miss']) try: driver.set_script_timeout(10000000000) results = driver.execute_script( "return getHitMiss('" + clock_method + "'," + str(repetitions) + ")") #This might fail if SABs are unavailable so we set a try except JavascriptException as e: # This exception occurs when we try to access to SABs without coop/coep on later versions. print('SABs are not available without coop here, skipping') return except WebDriverException: # This exception occurs when SABs are disabled/not implemented, so mainly old versions. print('SABs are not available here, skipping') return driver.close() stat_hits = utility.get_stats(results['hits']) stat_hits['name'] = 'hit/miss' stat_hits['clock_method'] = clock_method stat_hits['hit/miss'] = 'hits' stat_hits['version'] = version stat_hits['coop'] = coop utility.write_results(stat_hits, version, 'firefox') stat_misses = utility.get_stats(results['misses']) stat_misses['name'] = 'hit/miss' stat_misses['clock_method'] = clock_method stat_misses['hit/miss'] = 'misses' stat_misses['version'] = version stat_misses['coop'] = coop utility.write_results(stat_misses, version, 'firefox') print('Done') print()