def main(): setup_loggers() parser = argparse.ArgumentParser( description='query and compare CBT test results') parser.add_argument( '-a', '--archive', required=True, help='Directory where the results to be compared are archived.') parser.add_argument( '-b', '--baseline', required=True, help='Directory where the baseline results are archived.') parser.add_argument('-v', '--verbose', action='store_true', help='be chatty') ctx = parser.parse_args(sys.argv[1:]) # settings.initialize() expects ctx.config_file and ctx.conf ctx.config_file = os.path.join(ctx.archive, 'results', 'cbt_config.yaml') ctx.conf = None settings.initialize(ctx) results = [] for iteration in range(settings.cluster.get('iterations', 0)): cluster = Ceph(settings.cluster) benchmarks = list( zip(benchmarkfactory.get_all(ctx.archive, cluster, iteration), benchmarkfactory.get_all(ctx.baseline, cluster, iteration))) for current, baseline in benchmarks: if not current.exists(True): logger.error("tested: %s result does not exist in %s", current, ctx.archive) break if not baseline.exists(True): logger.error("baseline: %s result does not exist in %s", baseline, ctx.baseline) break results.extend(current.evaluate(baseline)) accepted = sum(result.accepted for result in results) if ctx.verbose: for result in results: if result.accepted: logger.info(result) else: logger.warning(result) rejected = len(results) - accepted if rejected > 0: logger.warning("%d tests failed out of %d", rejected, len(results)) sys.exit(1) else: logger.info("All %d tests passed.", len(results))
def main(argv): setup_loggers() ctx = parse_args(argv) settings.initialize(ctx) iteration = 0 logger.debug("Settings.cluster:\n %s", pprint.pformat(settings.cluster).replace("\n", "\n ")) global_init = collections.OrderedDict() # FIXME: Create ClusterFactory and parametrically match benchmarks and clusters. cluster = Ceph(settings.cluster) # E_OK return_code = 0 try: for iteration in range(settings.cluster.get("iterations", 0)): archive_dir = settings.cluster.get('archive_dir') benchmarks = benchmarkfactory.get_all(archive_dir, cluster, iteration) for b in benchmarks: if b.exists(): continue # Tell the benchmark to initialize unless it's in the skip list. if b.getclass() not in global_init: b.initialize() # Skip future initializations unless rebuild requested. if not settings.cluster.get('rebuild_every_test', False): global_init[b.getclass()] = b # always try to initialize endpoints. b.initialize_endpoints() try: b.run() finally: if b.getclass() not in global_init: b.cleanup() except: return_code = 1 # FAIL logger.exception("During tests") finally: for k, b in list(global_init.items()): try: b.cleanup() except: logger.exception("During %s cleanup", k) return_code = 1 # FAIL return return_code
def run(self): appdef = None projFile = QgsProject.instance().fileName() if projFile: appdefFile = projFile + ".appdef" if os.path.exists(appdefFile): ret = QMessageBox.question(self.iface.mainWindow(), "Web app builder", "This project has been already published as a web app.\n" "Do you want to reload app configuration?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) if ret == QMessageBox.Yes: appdef = loadAppdef(appdefFile) initialize() dlg = MainDialog(appdef) dlg.exec_()
def main(argv): setup_loggers() ctx = parse_args(argv) settings.initialize(ctx) iteration = 0 logger.debug("Settings.cluster:\n %s", pprint.pformat(settings.cluster).replace("\n", "\n ")) global_init = collections.OrderedDict() # FIXME: Create ClusterFactory and parametrically match benchmarks and clusters. cluster = Ceph(settings.cluster) # E_OK return_code = 0 try: for iteration in range(settings.cluster.get("iterations", 0)): benchmarks = benchmarkfactory.get_all(cluster, iteration) for b in benchmarks: if b.exists(): continue # Tell the benchmark to initialize unless it's in the skip list. if b.getclass() not in global_init: b.initialize() # Skip future initializations unless rebuild requested. if not settings.cluster.get("rebuild_every_test", False): global_init[b.getclass()] = b try: b.run() finally: if b.getclass() not in global_init: b.cleanup() except: return_code = 1 # FAIL logger.exception("During tests") finally: for k, b in global_init.items(): try: b.cleanup() except: logger.exception("During %s cleanup", k) return_code = 1 # FAIL return return_code
def main(argv): setup_loggers() ctx = parse_args(argv) settings.initialize(ctx) logger.debug("Settings.general:\n %s", pprint.pformat(settings.general).replace("\n", "\n ")) pool = Pool() pool.print_counts() for i in xrange(5, 20): print "" print "setting osds to: %s" % i pool.set_osds(i) pool.print_counts()
def run(self): appdef = None projFile = QgsProject.instance().fileName() if projFile: appdefFile = projFile + ".appdef" if os.path.exists(appdefFile): ret = QMessageBox.question( self.iface.mainWindow(), "Web app builder", "This project has been already published as a web app.\n" "Do you want to reload app configuration?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) if ret == QMessageBox.Yes: appdef = loadAppdef(appdefFile) initialize() dlg = MainDialog(appdef) dlg.exec_()
def main(argv): setup_loggers() ctx = parse_args(argv) settings.initialize(ctx) logger.debug("Settings.general:\n %s", pprint.pformat(settings.general).replace("\n", "\n ")) pool = Pool() pool.print_counts() for i in xrange(9, 10): print "" print "setting osds to: %s" % i pool.set_osds(i) pool.print_counts()
def __init__(self, parent): super().__init__() self.last_render_function = None self.img_id = None self.mouse_down_position = None self.complex_plane = None self.parent = parent # self.root.title('{} - {}'.format(TITLE, VERSION)) settings.initialize() coloring.initialize() self.canvas = tk.Canvas(self.parent) self.canvas.bind('<Motion>', self.mouse_move) self.canvas.bind('<ButtonPress-1>', self.mouse_down) self.canvas.bind('<ButtonRelease-1>', self.mouse_up) self.canvas.grid(row=0, column=0, sticky=N + E + S + W) self.scroll_x = tk.Scrollbar(self.parent, orient=tk.HORIZONTAL) self.scroll_y = tk.Scrollbar(self.parent, orient=tk.VERTICAL) self.scroll_x.config(command=self.canvas.xview) self.scroll_y.config(command=self.canvas.yview) self.scroll_x.grid(row=1, column=0, sticky=E + W) self.scroll_y.grid(row=0, column=1, sticky=N + S) self.canvas.config(xscrollcommand=self.scroll_x.set) self.canvas.config(yscrollcommand=self.scroll_y.set) self.coords_var = tk.StringVar() self.coords_var.set('###') self.coords_lbl = tk.Label(self.parent, textvariable=self.coords_var) self.coords_lbl.grid(row=2, column=0, columnspan=2, sticky=S + E) tk.Grid.rowconfigure(self.parent, 0, weight=1) tk.Grid.columnconfigure(self.parent, 0, weight=1) self.menubar = tke.Menu(self.parent) self.parent.config(menu=self.menubar) self.filemenu = tk.Menu(self.menubar) self.filemenu.add_command(label='Save', command=lambda: print('sorry')) self.filemenu.add_command(label='Eport', command=self.export_image) # self.filemenu.add_separator() self.filemenu.add_command(label='Close', command=self.parent.quit) self.menubar.add_cascade(label='File', menu=self.filemenu) self.rendermenu = tke.Menu(self.menubar) self.rendermenu.add_command(label='Mandelbrot', command=self.render_mandelbrot) self.rendermenu.add_command(label='Julia', command=self.render_julia) self.menubar.add_cascade(label='Render', menu=self.rendermenu) self.settingsmenu = tke.Menu(self.menubar) self.settingsmenu.add_command(label='Canvas', command=self.canvas_settings) self.settingsmenu.add_command(label='Mandelbrot', command=self.mandelbrot_settings) self.settingsmenu.add_command(label='Julia', command=self.julia_settings) self.settingsmenu.add_command(label='Coloring', command=self.coloring_settings) self.menubar.add_cascade(label='Settings', menu=self.settingsmenu) self.helpmenu = tke.Menu(self.menubar) self.helpmenu.add_command(label='About', command=self.about_dialog) self.menubar.add_cascade(label='Help', menu=self.helpmenu) self.parent.minsize(*WINDOW_SIZE_MIN) self.parent.mainloop()
parser.add_argument( '--conf', required = False, help = 'The ceph.conf file to use.', ) parser.add_argument( 'config_file', help = 'YAML config file.', ) args = parser.parse_args() return args if __name__ == '__main__': ctx = parse_args() settings.initialize(ctx) iteration = 0 print settings.cluster global_init = {} while (iteration < settings.cluster.get("iterations", 0)): benchmarks = benchmarkfactory.getAll(iteration) for b in benchmarks: if b.exists(): continue if not b.getclass() in global_init: b.initialize() if not settings.cluster.get('rebuild_every_test', False): global_init[b.getclass()] = True b.run()
def train( gpu:Param("GPU to run on", str)=None, woof: Param("Use imagewoof (otherwise imagenette)", int)=0, lr: Param("Learning rate", float)=1e-3, size: Param("Size (px: 128,192,224)", int)=128, alpha: Param("Alpha", float)=0.99, mom: Param("Momentum", float)=0.9, eps: Param("epsilon", float)=1e-6, epochs: Param("Number of epochs", int)=5, bs: Param("Batch size", int)=256, mixup: Param("Mixup", float)=0., opt: Param("Optimizer (adam,rms,sgd)", str)='adam', arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50', sa: Param("Self-attention", int)=0, sym: Param("Symmetry for self-attention", int)=0, dump: Param("Print model; don't train", int)=0, lrfinder: Param("Run learning rate finder; don't train", int)=0, log: Param("Log file name", str)='log', sched_type: Param("LR schedule type", str)='one_cycle', ann_start: Param("Mixup", float)=-1.0, name_num: Param("Name_Number", int)= 1, run_num: Param("Run_Number", int)= 1 ): "Distributed training of Imagenette." bs_one_gpu = bs gpu = setup_distrib(gpu) if gpu is None: bs *= torch.cuda.device_count() if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps) elif opt=='radam' : opt_func = partial(RAdam, betas=(mom,alpha), eps=eps) elif opt=='novograd' : opt_func = partial(Novograd, betas=(mom,alpha), eps=eps) elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha, eps=eps) elif opt=='sgd' : opt_func = partial(optim.SGD, momentum=mom) elif opt=='rangervar' : opt_func = partial(RangerVar, betas=(mom,alpha), eps=eps) elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps) elif opt=='ralamb' : opt_func = partial(Ralamb, betas=(mom,alpha), eps=eps) elif opt=='over9000' : opt_func = partial(Over9000, k=12, betas=(mom,alpha), eps=eps) elif opt=='lookahead' : opt_func = partial(LookaheadAdam, betas=(mom,alpha), eps=eps) elif opt=='Adams': opt_func=partial(Adams) elif opt=='rangernovo': opt_func=partial(RangerNovo) elif opt=='rangerlars':opt_func=partial(RangerLars) data = get_data(size, woof, bs) bs_rat = bs/bs_one_gpu #originally bs/256 if gpu is not None: bs_rat *= max(num_distrib(), 1) if not gpu: print(f'lr: {lr}; eff_lr: {lr*bs_rat}; size: {size}; alpha: {alpha}; mom: {mom}; eps: {eps}') lr *= bs_rat m = globals()[arch] settings.initialize() func_list = [*functions] name = func_list[name_num] settings.activ = functions[name] print(" ---------------- Activation Function ---------------- ") print(settings.activ ) print(" ---------------- Activation Function ---------------- ") log_cb = partial(CSVLogger,filename=log) result_path = settings.activ.replace("()","") + "_" + str(run_num)+ "_best_model" learn = (Learner(data, m(c_out=10, sa=sa,sym=sym), wd=1e-2, opt_func=opt_func, metrics=[accuracy,top_k_accuracy], bn_wd=False, true_wd=True, loss_func = LabelSmoothingCrossEntropy(), callback_fns=[log_cb, PeakMemMetric, partial(SaveModelCallback , every='epoch', monitor='accuracy' , name = result_path ) ]) ) print(learn.path) n = len(learn.data.train_dl) ann_start2= int(n*epochs*ann_start) print(ann_start2," annealing start") if dump: print(learn.model); exit() if mixup: learn = learn.mixup(alpha=mixup) learn = learn.to_fp16(dynamic=True) if gpu is None: learn.to_parallel() elif num_distrib()>1: learn.to_distributed(gpu) # Requires `-m fastai.launch` if lrfinder: # run learning rate finder IN_NOTEBOOK = 1 learn.lr_find(wd=1e-2) learn.recorder.plot() else: if sched_type == 'one_cycle': learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3) elif sched_type == 'flat_and_anneal': fit_with_annealing(learn, epochs, lr, ann_start) return learn.recorder.metrics[-1][0]
def testGetName(self): import settings settings.initialize('testsettings') self.assertEqual(settings.get_preference('name'), 'steve')
def __init__(self, parent): super().__init__() self.last_render_function = None self.img_id = None self.mouse_down_position = None self.complex_plane = None self.parent = parent # self.root.title('{} - {}'.format(TITLE, VERSION)) settings.initialize() coloring.initialize() self.canvas = tk.Canvas(self.parent) self.canvas.bind('<Motion>', self.mouse_move) self.canvas.bind('<ButtonPress-1>', self.mouse_down) self.canvas.bind('<ButtonRelease-1>', self.mouse_up) self.canvas.grid(row=0, column=0, sticky=N+E+S+W) self.scroll_x = tk.Scrollbar(self.parent, orient=tk.HORIZONTAL) self.scroll_y = tk.Scrollbar(self.parent, orient=tk.VERTICAL) self.scroll_x.config(command=self.canvas.xview) self.scroll_y.config(command=self.canvas.yview) self.scroll_x.grid(row=1, column=0, sticky=E+W) self.scroll_y.grid(row=0, column=1, sticky=N+S) self.canvas.config(xscrollcommand=self.scroll_x.set) self.canvas.config(yscrollcommand=self.scroll_y.set) self.coords_var = tk.StringVar() self.coords_var.set('###') self.coords_lbl = tk.Label(self.parent, textvariable=self.coords_var) self.coords_lbl.grid(row=2, column=0, columnspan=2, sticky=S+E) tk.Grid.rowconfigure(self.parent, 0, weight=1) tk.Grid.columnconfigure(self.parent, 0, weight=1) self.menubar = tke.Menu(self.parent) self.parent.config(menu=self.menubar) self.filemenu = tk.Menu(self.menubar) self.filemenu.add_command(label='Save', command=lambda:print('sorry')) self.filemenu.add_command(label='Eport', command=self.export_image) # self.filemenu.add_separator() self.filemenu.add_command(label='Close', command=self.parent.quit) self.menubar.add_cascade(label='File', menu=self.filemenu) self.rendermenu = tke.Menu(self.menubar) self.rendermenu.add_command(label='Mandelbrot', command=self.render_mandelbrot) self.rendermenu.add_command(label='Julia', command=self.render_julia) self.menubar.add_cascade(label='Render', menu=self.rendermenu) self.settingsmenu = tke.Menu(self.menubar) self.settingsmenu.add_command(label='Canvas', command=self.canvas_settings) self.settingsmenu.add_command(label='Mandelbrot', command=self.mandelbrot_settings) self.settingsmenu.add_command(label='Julia', command=self.julia_settings) self.settingsmenu.add_command(label='Coloring', command=self.coloring_settings) self.menubar.add_cascade(label='Settings', menu=self.settingsmenu) self.helpmenu = tke.Menu(self.menubar) self.helpmenu.add_command(label='About', command=self.about_dialog) self.menubar.add_cascade(label='Help', menu=self.helpmenu) self.parent.minsize(*WINDOW_SIZE_MIN) self.parent.mainloop()
def testLotsOfSpaces(self): import settings settings.initialize('testsettings') handles = settings.get_preference('tweet_lots_space') desired = ['stevenjarvis', 'allieehenry'] self.assertEqual(handles, desired)
def testIgnoreComments(self): import settings settings.initialize('testsettings') self.assertEqual(len(settings.prefs), 5)
def main(): setup_loggers() parser = argparse.ArgumentParser( description='query and compare CBT test results') parser.add_argument( '-a', '--archive', required=True, help='Directory where the results to be compared are archived.') parser.add_argument( '-b', '--baseline', required=True, help='Directory where the baseline results are archived.') parser.add_argument('-v', '--verbose', action='store_true', help='be chatty') parser.add_argument('--output', help='write result in markdown to specified file', type=argparse.FileType('w')) ctx = parser.parse_args(sys.argv[1:]) # settings.initialize() expects ctx.config_file and ctx.conf ctx.config_file = os.path.join(ctx.archive, 'results', 'cbt_config.yaml') ctx.conf = None settings.initialize(ctx) results = [] for iteration in range(settings.cluster.get('iterations', 0)): cluster = Ceph(settings.cluster) benchmarks = list( zip(benchmarkfactory.get_all(ctx.archive, cluster, iteration), benchmarkfactory.get_all(ctx.baseline, cluster, iteration))) for current, baseline in benchmarks: if not current.exists(True): logger.error("tested: %s result does not exist in %s", current, ctx.archive) break if not baseline.exists(True): logger.error("baseline: %s result does not exist in %s", baseline, ctx.baseline) break results.extend(current.evaluate(baseline)) nr_accepted = sum(result.accepted for result in results) if ctx.verbose: for result in results: if result.accepted: logger.info(result) else: logger.warning(result) nr_tests = len(results) nr_rejected = nr_tests - nr_accepted if ctx.output: heading = None if nr_rejected: heading = Heading3(f'{nr_rejected} out of {nr_tests} failed') else: heading = Heading3(f'all {nr_tests} tests passed') ctx.output.write(str(heading)) table = Table() table.add_headers('run', 'metric', 'baseline', 'result', 'accepted') for r in results: table.add_cells(r.run, r.alias, r.baseline, r.result, ' ' if r.accepted else ':x:') ctx.output.write(str(table)) if nr_rejected > 0: logger.warning("%d tests failed out of %d", nr_rejected, len(results)) sys.exit(1) else: logger.info("All %d tests passed.", len(results))
self.applyAlgoritm(self.algorithm, forceReset) if (self.fShowGUI): # render skeleton(s) at time 0 self.selectTime(0) else: # write to file self.saveJSON() #------------------------------------------------------------------------------ # def run(self): self.openAndProcessInputfile() if (self.fShowGUI): # show all the plots and wait for user input plt.show() # ----------------------------------------------------------------------------- # applet's main entry point # if __name__ == '__main__': # initialize global variables settings.initialize() # create main application object, then run it settings.application = application() settings.application.run() exit()
import numpy as np import matplotlib.pyplot as plt import settings import players import update_decide import PGG import Measure import time [adj_mat, player_arr]=settings.initialize(1000) <<<<<<< HEAD n=5000 ======= n=20000 >>>>>>> 7bb108783e2256362ad1ff8e19552e8007ca021e #Arrays for plots coop_frac_arr=[] sat_arr=[] avgdeg_arr=[] ttime=[] tti=time.time() for i in range(n): PGG.pgg(adj_mat, player_arr) update_decide.update_and_decide(adj_mat, player_arr) [coop,sat,avgdeg]=(Measure.measure(adj_mat, player_arr))