# convert to the appropriate objects files = map(watchFactory, files) return files def run(self): if self.watching == []: print "[watch] No files to watch." sys.exit(0) global POLLING_INTERVAL while True: time.sleep(POLLING_INTERVAL) for f in self.watching: if f.hasChanged(): verbose.changeReport(f) Popen(self.cmd) if __name__ == "__main__": w = Watcher(getArguments(POLLING_INTERVAL)) try: w.run() except KeyboardInterrupt: print "\n" sys.exit(0) except: raise
# SPDX-License-Identifier: MIT from __future__ import print_function import numpy as np import timeit from numpy.random import rand from sklearn import linear_model from args import getArguments, coreString import sklearn import bench import argparse argParser = argparse.ArgumentParser(prog="ridge.py", description="sklearn ridge regression benchmark", formatter_class=argparse.ArgumentDefaultsHelpFormatter) args = getArguments(argParser) REP = args.iteration if args.iteration != '?' else 10 core_number, daal_version = bench.prepare_benchmark(args) def st_time(func): def st_func(*args, **keyArgs): times = [] for n in range(REP): t1 = timeit.default_timer() r = func(*args, **keyArgs) t2 = timeit.default_timer() times.append(t2-t1) print (min(times)) return r return st_func
key=operator.itemgetter(1), reverse=True) # print("\nSeeds: {}\n".format(sorted_seeds)) opt_seed = [] i = 0 while len(opt_seed) < size: seed = int(sorted_seeds[i][0]) if (seed not in guaranteed): opt_seed.append(seed) i += 1 # select top k users and compute inf_score score = inf_score_est_mp(graph, opt_seed) file_path = "data/{}/sim/opt_seed_{}.csv".format(dataset, model) with open(file_path, 'w') as f: for seed in opt_seed: f.write(str(seed)) f.write("\n") print("Optimal seed set saved to {}".format(file_path)) msg = "Best seed set found score is: {} " print(msg.format(score)) if __name__ == "__main__": args = args.getArguments("SIM") # run optimal seed size for given graph for model in args.models: for reach in args.reach: run(args.dataset, model, args.simulations)
choices=['daal', 'full'], default='daal', help='SVD solver to use') parser.add_argument('--n-components', type=int, default=None, help='Number of components to find') parser.add_argument('--whiten', action='store_true', default=False, help='Perform whitening') parser.add_argument('--write-results', action='store_true', default=False, help='Write results to disk for verification') args = getArguments(parser) REP = args.iteration if args.iteration != '?' else 10 core_number, daal_version = prepare_benchmark(args) def st_time(func): def st_func(*args, **keyArgs): times = [] for n in range(REP): t1 = timeit.default_timer() r = func(*args, **keyArgs) t2 = timeit.default_timer() times.append(t2 - t1) print(min(times)) return r
import args from research_data import import_graph_data def run(dataset): # step 1 load graph graph = import_graph_data(dataset)[0] # step 2 get keys and order them sorted_keys = sorted(graph.keys()) # step 3 in new equivalency dict, assign index value to keys equivalency = {} for i in range(len(sorted_keys)): equivalency[sorted_keys[i]] = i # step 4 save graph back to file file_name = "data/{}/indexed_{}_wc.inf".format(dataset) with open(file_name, 'w') as f: for key in graph.keys(): for neighbor in graph[key].keys(): line = str(equivalency[key]) + ' ' line += str(equivalency[neighbor]) + ' ' line += str(graph[key][neighbor]) + "\n" f.write(line) if __name__ == "__main__": args = args.getArguments("idToIndex") run(args.dataset)