def __init__(self, verbose=False, visual_debug=False): self.verbose = verbose self.visual_debug = visual_debug ipdb.launch_ipdb_on_exception() # Control Stuff self.researched_warpgate = False # Remove me later # Managers and controllers self.worker_controller = WorkerController(bot=self, verbose=self.verbose) self.army_controller = ArmyController(bot=self, verbose=self.verbose) self.scouting_controller = ScoutingController(bot=self, verbose=self.verbose) self.upgrades_controller = UpgradesController(bot=self, verbose=self.verbose) self.robotics_facility_controller = RoboticsFacilitiyController( bot=self, verbose=self.verbose, ) self.gateway_controller = GatewayController( bot=self, verbose=self.verbose, auto_morph_to_warpgate=True) self.building_controller = BuildingController(bot=self, verbose=self.verbose) self.event_manager = EventManager() self.build_order_controller = BuildOrderController( verbose=self.verbose, bot=self) self.coordinator = Coordinator(bot=self, verbose=self.verbose, build_order='three_gate_blink_all_in') self.order_queue = []
def callback(): import ipdb with ipdb.launch_ipdb_on_exception(): code = request.args.get('code') client = create_client() r = client.request_access_token(code) access_token, expires, uid = r.access_token, r.expires, r.uid client.set_access_token(access_token, expires) u = client.users.show.get(uid=uid) user = User.query.filter_by(uid=uid).first() if not user: user = User(username=u.screen_name, uid=u.id, status_count=u.statuses_count, friends_count=u.friends_count, email='*****@*****.**', followers_count=u.followers_count, auth_token=access_token, expired_time=expires, image_url=u.avatar_large or u.profile_image_url) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() user.confirm(token) login_user(user) return redirect(url_for('main.index'))
def normal_data_gen(self, X_train, y_train=None, test=False): """normal data generator for training """ while True: if not test: base_id = np.random.permutation(range(len(X_train))) else: base_id = list(range(len(X_train))) for start in range(0, len(X_train), self.batch_size): end = min(start + self.batch_size, len(X_train)) idx = base_id[start:end] batch_df = X_train.iloc[idx] x_batch = batch_df[self.train_cols] if self.proc_per_gen: with launch_ipdb_on_exception(): x_batch = self.proc_per_gen(x_batch) if y_train is not None: y_batch = y_train.iloc[idx] if self.out_size: y_batch = self.dup_y(y_batch) if self.onehot_size: y_batch = np.identity(self.onehot_size)[y_batch.values] yield x_batch, y_batch else: yield x_batch
def build_cfg(instructions: List[Instruction], loader: Loader) -> CFG: """ Check that calls matches returns and that syscalls and non-jumps do not change the control flow. """ stack = [] # type: List[int] builder = CfgBuilder(instructions) for (i, instruction) in enumerate(instructions): if i > 0: previous = instructions[i - 1] if (previous.iclass in BLOCK_TERMINATORS or previous.ip + previous.size != instruction.ip): builder.add_edge(previous, instruction) if previous.iclass == InstructionClass.ptic_return: if len(stack) != 0: return_ip = stack.pop() if return_ip != instruction.ip: previous_loc = loader.find_location(instructions[i - 1].ip) instruction_loc = loader.find_location(instruction.ip) return_loc = loader.find_location(return_ip) l.warning( "unexpected call return {} from {} found: expected {}" .format(instruction_loc, previous_loc, return_loc)) stack = [] if instruction.iclass == InstructionClass.ptic_call: return_ip = instruction.ip + instruction.size stack.append(return_ip) from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): return builder.cfg()
def balanced_data_gen(self, X_train, y_train, sample_size=None): """balanced data generator for training """ pos_idx = list(X_train[y_train == 1].index.values) neg_idx = list(X_train[y_train == 0].index.values) neg_size = int(sample_size * self.pos_neg_ratio) print(len(pos_idx), len(neg_idx), neg_size) while True: pos_sample = random.sample(pos_idx, sample_size) neg_sample = random.sample(neg_idx, neg_size) sample_idx = np.concatenate([pos_sample, neg_sample]) base_df = X_train.loc[sample_idx].reset_index(drop=True) base_y = y_train.loc[sample_idx].reset_index(drop=True) base_id = np.random.permutation(len(base_df)) for start in range(0, len(base_df), self.batch_size): end = min(start + self.batch_size, len(base_df)) batch_df_id = base_id[start:end] batch_df = base_df.iloc[batch_df_id] x_batch = batch_df[self.train_cols] if self.proc_per_gen: with launch_ipdb_on_exception(): x_batch = self.proc_per_gen(x_batch) y_batch = base_y.iloc[batch_df_id] if self.out_size: y_batch = self.dup_y(y_batch) if self.onehot_size: y_batch = np.identity(self.onehot_size)[y_batch.values] yield x_batch, y_batch
def dump_embedd_layer(res_dir, conf): embedd_conf = conf[0] emb_dict = dict() model = embedded_mlp(conf) for j in tqdm(range(5)): fold_dict = dict() model.load_weights(Path(res_dir) / f'fold{j}' / 'weights.hdf5') for i, (c, _, m) in tqdm(enumerate(embedd_conf)): train = feather.read_dataframe( f'features/NN/train_nejumi_v2/{c}.ftr') train_orig = feather.read_dataframe(f'features/X_train_nejumi.ftr', columns=[c]) train = pd.concat([ train.rename({c: 'enc'}, axis=1), train_orig.rename({c: 'orig'}, axis=1) ], axis=1) train = train.drop_duplicates() import ipdb with ipdb.launch_ipdb_on_exception(): inputs = model.get_layer(f'{c}').input outputs = model.layers[i + 76].output emb_out = Model(input=inputs, output=outputs) emb = np.concatenate(emb_out.predict(train.enc.values), axis=0) conv_dict = dict() for i, v in enumerate(train.orig.values): conv_dict[v] = emb[i] fold_dict[c] = conv_dict # fold_idx = np.load(res_dir / f'fold{j}tes_ind.npy') emb_dict[f'fold_{j}'] = fold_dict return emb_dict
def main(): parser = optparse.OptionParser(usage='Usage. %prog [options] directory_or_epub_file') parser.add_option('--debug', action='store_true', dest='debug', default=False, help='Debug mode') parser.add_option('--no_images', action='store_false', dest='images', default=True, help='Disable images.') parser.add_option('--sword', action='store_true', dest='sword', default=False, help='Generate sword module. osis2mod anx xml2gbs from libsword-tools are needed.') parser.add_option('--osis', action='store_true', dest='osis', default=False, help='Write OSIS files.') parser.add_option('--no_nonadj', action='store_true', dest='no_nonadj', default=False, help='Do not create empty comments (with only links) for non-adjacent verse ranges') parser.add_option('--bible_work_id', dest='bible_work_id', default='None', help='Bible work_id (verses are linked there). "None" -> no work_id specification') options, args = parser.parse_args() if len(args) == 1: input_file = args[0] if options.debug or True: from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): Convert(options, input_file).process_epub() else: Convert(options, input_file).process_epub(input_file) else: parser.print_help()
def __iter__(self): co = self._gen_func() with suppress(StopIteration): val = None while True: #? value = co.send(val) if isinstance(value, AnyCall): for i, p in enumerate(value.calls): p.on() val = (yield) for i, call in enumerate(value.calls): if call.original is not val.func: continue enter = (val.ret is NotImplemented) if enter == (call.type == 'enter'): val.which = i break for p in value.calls: p.off() continue p = value # container, attr = co.send(val) import ipdb with ipdb.launch_ipdb_on_exception(): p.on() val = (yield) p.off()
def transform(args, argv): """ Usage: {0} transform <sourceconfig> FILE ... {0} transform <sourceconfig> --directory=DIR Options: -d, --directory=DIR Transform all JSON files in DIR Transform all given JSON files. Results will be printed to stdout. """ config = SourceConfig.objects.get(label=args['<sourceconfig>']) transformer = config.get_transformer() if args['FILE']: files = args['FILE'] else: files = [os.path.join(args['--directory'], x) for x in os.listdir(args['--directory']) if not x.startswith('.')] for name in files: with open(name) as fobj: data = fobj.read() from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): print('Parsed raw data "{}" into'.format(name)) pprint(transformer.transform(data)) print('\n')
def handle_packet_in(self, concrete_pkt): pyretic_pkt = self.concrete2pyretic(concrete_pkt) if self.debug_packet_in: debugger.set_trace() if USE_IPDB: with debugger.launch_ipdb_on_exception(): if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output,traversed) = self.policy.track_eval(pyretic_pkt) self.reactive0(pyretic_pkt,output,traversed) else: try: if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output,traversed) = self.policy.track_eval(pyretic_pkt) self.reactive0(pyretic_pkt,output,traversed) except : type, value, tb = sys.exc_info() traceback.print_exc() debugger.post_mortem(tb) if self.show_traces: print "<<<<<<<<< RECV <<<<<<<<<<<<<<<<<<<<<<<<<<" print util.repr_plus([pyretic_pkt], sep="\n\n") print print ">>>>>>>>> SEND >>>>>>>>>>>>>>>>>>>>>>>>>>" print util.repr_plus(output, sep="\n\n") print map(self.send_packet,output)
def makeplot(self): base_names = self.base_names.split(',') datasets = [] for base_name in base_names: inputs = self.input()[base_name] v1 = inputs[self.v1].open(decode_times=False) v2 = inputs[self.v2].open(decode_times=False) ds = xr.merge([v1, v2]) ds = ds.sel(zt=slice(0.0, self.z_max)) ds = ds.isel(zt=slice(None, None, self.z_step)) ds.attrs['name'] = self.get_suptitle(base_name) datasets.append(ds) plot_fn = length_scales.cumulant.sections.plot import ipdb with ipdb.launch_ipdb_on_exception(): ax = plot_fn( datasets=datasets, var_names=[self.v1, self.v2], ) return ax
def handle_packet_in(self, concrete_pkt): pyretic_pkt = self.concrete2pyretic(concrete_pkt) if self.debug_packet_in: debugger.set_trace() if USE_IPDB: with debugger.launch_ipdb_on_exception(): if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output,eval_trace) = self.policy.track_eval(pyretic_pkt,dry=False) self.reactive0(pyretic_pkt,output,eval_trace) else: try: if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output,eval_trace) = self.policy.track_eval(pyretic_pkt,dry=False) self.reactive0(pyretic_pkt,output,eval_trace) except : type, value, tb = sys.exc_info() traceback.print_exc() debugger.post_mortem(tb) if self.show_traces or True: self.trace("<<<<<<<<< RECV <<<<<<<<<<<<<<<<<<<<<<<<<<", timeStamped=True) self.trace(util.repr_plus([pyretic_pkt], sep="\n\n")) self.trace("") self.trace(">>>>>>>>> SEND >>>>>>>>>>>>>>>>>>>>>>>>>>", timeStamped=True) self.trace(util.repr_plus(output, sep="\n\n")) self.trace("") map(self.send_packet,output)
def handle_packet_in(self, concrete_pkt): pyretic_pkt = self.concrete2pyretic(concrete_pkt) if self.debug_packet_in: debugger.set_trace() if USE_IPDB: with debugger.launch_ipdb_on_exception(): if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output, eval_trace) = self.policy.track_eval(pyretic_pkt, dry=False) self.reactive0(pyretic_pkt, output, eval_trace) else: try: if self.mode == 'interpreted': output = self.policy.eval(pyretic_pkt) else: (output, eval_trace) = self.policy.track_eval(pyretic_pkt, dry=False) self.reactive0(pyretic_pkt, output, eval_trace) except: type, value, tb = sys.exc_info() traceback.print_exc() debugger.post_mortem(tb) if self.show_traces: print "<<<<<<<<< RECV <<<<<<<<<<<<<<<<<<<<<<<<<<" print util.repr_plus([pyretic_pkt], sep="\n\n") print print ">>>>>>>>> SEND >>>>>>>>>>>>>>>>>>>>>>>>>>" print util.repr_plus(output, sep="\n\n") print map(self.send_packet, output)
def handle(self, *args, **options): parsers_to_run = filter(options.__getitem__, parsers.keys()) if not parsers_to_run: parsers_to_run = parsers.values() else: parsers_to_run = [parsers[x] for x in parsers_to_run] tasks = [] for parser in parsers_to_run: if options['ipdb']: with launch_ipdb_on_exception(): if options["sync"]: parser.scrape(sync=options["sync"], cache=options["cache"]) else: tasks.append(parser.scrape(sync=options["sync"], cache=options["cache"])) else: if options["sync"]: parser.scrape(sync=options["sync"], cache=options["cache"]) else: tasks.append(parser.scrape(sync=options["sync"], cache=options["cache"])) if not options["sync"]: map(lambda x: x.get(), tasks)
def test_aegan(aegan, prefix): import ipdb with ipdb.launch_ipdb_on_exception(): aegan.load(prefix=prefix) from GAN.utils.vis import vis_grid vis_grid(inverse_transform(aegan.generator.random_generate(128)), (2, 20), 'random_generate.png') paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines()) from load import load_image sample = transform( np.array([load_image(path, (64, 128)) for path in paths[:128]]) ) vis_grid(inverse_transform(sample), (2, 20), 'sample.png') vis_grid(inverse_transform(aegan.autoencoder.autoencoder.predict(sample)), (2, 20), 'reconstruct.png') import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt codes = aegan.autoencoder.encoder.predict(sample) # codes = aegan.generator.sample(128) # codes = aegan.autoencoder.encoder.predict(aegan.generator.random_generate(128)) for ind, code in enumerate(codes): n, bins, patches = plt.hist(code, 50, normed=1, facecolor='green', alpha=0.75) plt.savefig('test/{}.pdf'.format(ind)) plt.clf() ipdb.set_trace()
def click_run(path, config, scenario, load_demand, solve_demand, load_supply, solve_supply, load_error, export_results, pickle_shapes, save_models, log_name, api_run, debug, clear_results): if debug: import ipdb with ipdb.launch_ipdb_on_exception(): run(path, config, scenario, load_demand, solve_demand, load_supply, solve_supply, load_error, export_results, pickle_shapes, save_models, log_name, api_run, clear_results) else: run(path, config, scenario, load_demand, solve_demand, load_supply, solve_supply, load_error, export_results, pickle_shapes, save_models, log_name, api_run, clear_results)
def make_plot(self, da_emb): import ipdb with ipdb.launch_ipdb_on_exception(): return make_rgb_annotation_map_image( da_emb=da_emb, rgb_components=self.rgb_components, )
def feature_aegan(aegan, modelname, protoname): with ipdb.launch_ipdb_on_exception(): aegan.load(prefix=modelname) x = transform(load_all(protoname, (npxw, npxh))) code = aegan.autoencoder.encoder.predict(x) ipdb.set_trace()
def main(argv: List[str] = sys.argv) -> Any: args = parse_arguments(argv) if args.debug: from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): return args.func(args) return args.func(args)
def run(self): try: import ipdb with ipdb.launch_ipdb_on_exception(): _main(self.args) return 0 except ImportError: _main(self.args)
def main(): parser = argparse.ArgumentParser() parser = argparse.ArgumentParser(description="Bibliography database manipulation") parser.add_argument("--debug", action="store_true") parser.add_argument("--logging-level", "-L", help="Logging level: CRITICAL, ERROR, WARNING, INFO, DEBUG", metavar="LEVEL", type=str, default="WARNING") parser.add_argument("--data-dir", help="Path to articles directory", type=str, default=None) subparsers = parser.add_subparsers(title='Commands', dest='_commandName') for cmdType in Registry.commands: cmdType().args(subparsers) argcomplete.autocomplete(parser) args = parser.parse_args() if args.debug: msg.setup(level="DEBUG") try: from ipdb import launch_ipdb_on_exception except ModuleNotFoundError: from contextlib import contextmanager def noop(): yield launch_ipdb_on_exception = contextmanager(noop) else: msg.setup(level=args.logging_level) ddir = Database.getDataDir(dataDir=args.data_dir) if ddir: RequestCache(os.path.join(ddir, ".cache.pkl")) try: if hasattr(args, "func"): if args.debug: with launch_ipdb_on_exception(): args.func(args) else: args.func(args) else: parser.print_usage() except UserException as e: msg.error("Error: %s", e) sys.exit(1) except AbortException: msg.error("Aborted") sys.exit(1) except (WorkExistsException, RepositoryException) as e: msg.error(str(e)) sys.exit(1) except: t,v,_ = sys.exc_info() msg.critical("Unhandled exception: %s(%s)", t.__name__, v) sys.exit(1)
def run_transformer(config, id, datum): transformer = config.get_transformer() with launch_ipdb_on_exception(): graph = transformer.transform(datum) if args.get('--regulate'): Regulator(source_config=config).regulate(graph) print('Parsed raw data "{}" into'.format(id)) pprint(graph.to_jsonld(in_edges=False)) print('\n')
def result(self): """ IMPLEMENT ME """ with launch_ipdb_on_exception(): #print self.init, self.others ret = self.min_ops(0, self.init) #print self.reason[0, self.init] return ret
def result(self): with launch_ipdb_on_exception(): if any('.' in row for row in self.grid): status = "Game has not completed" else: status = "Draw" for seq in self.rows + self.columns + self.diagonals: if self.winner(seq): return self.winner(seq) + " won" return status
def transform(args, argv): """ Usage: {0} transform <sourceconfig> FILE ... {0} transform <sourceconfig> --directory=DIR {0} transform --ids <raw_data_ids>... Options: -d, --directory=DIR Transform all JSON files in DIR -i, --ids Provide RawDatum IDs to transform Transform all given JSON files. Results will be printed to stdout. """ from ipdb import launch_ipdb_on_exception ids = args['<raw_data_ids>'] if ids: qs = RawDatum.objects.filter(id__in=ids) for raw in qs.iterator(): transformer = raw.suid.source_config.get_transformer() with launch_ipdb_on_exception(): print('Parsed raw data "{}" into'.format(raw.id)) pprint(transformer.transform(raw.datum)) print('\n') return config = SourceConfig.objects.get(label=args['<sourceconfig>']) transformer = config.get_transformer() if args['FILE']: files = args['FILE'] else: files = [ os.path.join(args['--directory'], x) for x in os.listdir(args['--directory']) if not x.startswith('.') ] for name in files: with open(name) as fobj: data = fobj.read() with launch_ipdb_on_exception(): print('Parsed raw data "{}" into'.format(name)) pprint(transformer.transform(data).to_jsonld(in_edges=False)) print('\n')
def main(): try: filename = sys.argv[1] except IndexError: print(USAGE) return from can import BLFReader with ipdb.launch_ipdb_on_exception(): reader = BLFReader(filename) for frame in reader: print(frame)
def handle(self, *args, **options): parsers_to_run = filter(lambda x: options[x], parsers.keys()) if not parsers_to_run: parsers_to_run = parsers.values() else: parsers_to_run = [parsers[x] for x in parsers_to_run] if deputies in parsers_to_run: if options['ipdb']: with launch_ipdb_on_exception(): deputies.deputies_list() else: deputies.deputies_list() for parser in parsers_to_run: if options['ipdb']: with launch_ipdb_on_exception(): parser.scrape() else: parser.scrape()
def handle(self, *args, **options): # TODO Handle contributors, they're not a direct 1-to-1 they'll need some love # it's either this or catch the exception and put them in the blacklistguid table register_nonexistent_models_with_modm() models = get_ordered_models() # guids first, pls models.insert(0, models.pop(models.index(Guid))) if not options['nodelogs'] and not options['nodelogsguids']: merge_duplicate_users() # merged users get blank usernames, running it twice fixes it. merge_duplicate_users() for django_model in models: if not options['nodelogs'] and not options[ 'nodelogsguids'] and django_model is NodeLog: continue elif (options['nodelogs'] or options['nodelogsguids']) and django_model is not NodeLog: continue if issubclass(django_model, AbstractBaseContributor) \ or django_model is ApiOAuth2Scope \ or not hasattr(django_model, 'modm_model_path'): continue module_path, model_name = django_model.modm_model_path.rsplit( '.', 1) modm_module = importlib.import_module(module_path) modm_model = getattr(modm_module, model_name) modm_queryset = modm_model.find(django_model.modm_query) with ipdb.launch_ipdb_on_exception(): if hasattr(django_model, 'primary_identifier_name') and \ not issubclass(django_model, GuidMixin) and \ django_model is not NotificationSubscription: if not options['nodelogs']: make_guids(django_model, page_size=django_model.migration_page_size) if not options['nodelogsguids']: save_bare_models( modm_queryset, django_model, page_size=django_model.migration_page_size) modm_model._cache.clear() modm_model._object_cache.clear() print('Took out {} trashes'.format(gc.collect())) # Handle system tags, they're on nodes, they need a special migration if not options['nodelogs'] and not options['nodelogsguids']: save_bare_system_tags()
def result(self): """ Use multi-commodity network flow to calculate a partial order """ with launch_ipdb_on_exception(): order = None if self.sufficient_keys(self.init_keys, opened=set([])): order = self.solve_plan() if order is None: return "IMPOSSIBLE" return ' '.join(str(o + 1) for o in order)
def result(self): """ IMPLEMENT ME """ with launch_ipdb_on_exception(): n = 0 for pal in palindromes(int(math.sqrt(self.a) + .5), int(math.sqrt(self.b))): if self.a <= pal**2 <= self.b and is_palindrome(pal**2): n += 1 return n
def interative_test_ipdb_embed(): """ CommandLine: xdoctest -m dev/interactive_embed_tests.py interative_test_ipdb_embed Example: >>> interative_test_ipdb_embed() """ import ipdb with ipdb.launch_ipdb_on_exception(): raise Exception
def main(): args = parseArgs() if args.pdb: try: import ipdb with ipdb.launch_ipdb_on_exception(): _main(args) return 0 except ImportError: return _main(args) else: return _main(args)
def main(): args = parseArgs() if args.pdb: try: import ipdb with ipdb.launch_ipdb_on_exception(): _main(args) return 0 except ImportError: _main(args) else: _main(args)
def feature(aegan, filename): import ipdb with ipdb.launch_ipdb_on_exception(): aegan.load(prefix='./samples/reid_aegan/aegan/50') paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines()) x = transform(np.array([load_image(path, (64, 128)) for path in paths])) code = aegan.autoencoder.encoder.predict(x) ipdb.set_trace()
def debug_func(self, *args, **kwargs): if not self.on_error: return runcall(self.func, *args, **kwargs) try: if has_ipdb: with launch_ipdb_on_exception(): return self.func(*args, **kwargs) else: return self.func(*args, **kwargs) except Exception as e: traceback.print_exc(file=sys.stderr) Pdb(stdin=sys.__stdin__, stdout=sys.__stdout__).set_trace()
def test_add_item(self): global errors from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): driver = self.driver driver.get(self.base_url + "/") print("Attempting logon") # login start driver.find_element_by_id("header_sign_in").click() driver.find_element_by_id("logonId").click() driver.find_element_by_id("logonId").clear() driver.find_element_by_id("logonId").send_keys(self.username) driver.find_element_by_id("logonPassword_id").clear() driver.find_element_by_id("logonPassword_id").send_keys( self.password) driver.find_element_by_id("deliveryZipCode").clear() driver.find_element_by_id("deliveryZipCode").send_keys("92093") print("Submit") driver.find_element_by_css_selector( "#LogonFormBD input[type=submit]").click() # login done num_items = len(self.order) # ordering start for i, (itemNo, qty, name) in enumerate(self.order): line = " ({:3}/{:<3}) {:2} of #{:<8} - {}".format( i + 1, num_items, qty, itemNo, name) print(line) started = time.time() driver.get(self.base_url + "/OrderByItemsDisplayViewBD") #driver.find_element_by_id("headerOrderByItem").click() itemNumber = driver.find_element_by_css_selector( "input[name=itemNumber]") itemNumber.clear() itemNumber.send_keys(itemNo) itemQuantity = driver.find_element_by_css_selector( "input[name=itemQuantity]") itemQuantity.clear() itemQuantity.send_keys(qty) driver.find_element_by_id("obiAddToCartFtr").click() if self.is_element_present(By.CSS_SELECTOR, 'div.text-error'): sys.stderr.write("#### Error adding {} {}\n".format( qty, itemNo)) reason = self.driver.find_elements( by=By.CSS_SELECTOR, value='div.text-error')[0].text errors.append((line, reason)) print(" Took {:4}".format(time.time() - started))
def run(debug, ipdb, testfile, testname, skel, sources, symlinks): if debug: logging.basicConfig(level=logging.DEBUG) log = logging.getLogger('nixtest') log.debug("Called with: %r", locals()) if not testname: testname = os.path.basename(testfile) # XXX: hack to work around too long shebang line when run via nix-build if os.getcwd().startswith('/tmp/'): tempdir = os.getcwd() else: tempdir = tempfile.mkdtemp(prefix="nix-test-run.") workdir = os.path.join(tempdir, 'workdir') # Setup working directory for test if skel: shutil.copytree(skel, workdir) else: os.mkdir(workdir) os.chmod(workdir, 0755) # Change to working directory with local.cwd(workdir): for name, source in map(lambda x: x.split(':'), sources): shutil.copytree(source, name) make_umasked_writable(name) # Create symlinks, among others for "profile" for name, target in map(lambda x: x.split(':'), symlinks): os.symlink(target, name) # Setup profile as test environment using environment # variables. # # `from plumbum.cmd import X` will produce commands local to # that environment. # # probably we should rescue some variables local.env.clear() with local.env(**envvars("profile")): log = log.getChild(testname) testglobs = maketestglobs(local=local, log=log) if ipdb: from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): log.info("ipdb running tests in: %s" % workdir) execfile(testfile, testglobs) else: log.info("Running tests in: %s" % workdir) execfile(testfile, testglobs)
def run(self): ds = self.input().open() cumulants = self._parse_cumulant_arg() cumulants_s = ["C({},{})".format(c[0], c[1]) for c in cumulants] plot_fn = length_scales.cumulant.vertical_profile.plot.plot import ipdb with ipdb.launch_ipdb_on_exception(): plot_fn(data=ds, cumulants=cumulants_s, plot_type=self.plot_type) plt.savefig(self.output().path, bbox_inches='tight')
def run(self): if options.pdb: import ipdb with ipdb.launch_ipdb_on_exception(): self._run() elif options.doProfiling: cProfile.runctx("self._run()", globals=globals(), locals=locals(), filename=os.path.join(options.temporaryDirectory, "profile-%s.out" % (self.name))) else: self._run()
def _raven_state_cb(self, msg): if not self.is_recording: return if self.robot_frame is None: self.robot_frame = msg.header.frame_id with launch_ipdb_on_exception(): for arm in self.arms: self.robot_msgs = self.robot_msgs+1 arm_msg = [arm_ for arm_ in msg.arms if arm_.name == arm][0] pose = tfx.pose(arm_msg.tool.pose, header=msg.header) self.robot_poses[arm].append((pose.stamp.seconds,pose.array)) joints = tuple(j.position for j in arm_msg.joints) self.robot_joints[arm].append((pose.stamp.seconds,joints))
def run(self, test_classes): self.start_application() self.error_count = 0 try: with app.test_request_context(): if self.fail_method == 'ipdb': from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): self._run(test_classes) else: self._run(test_classes) except Exception: traceback.print_exc() raise SystemExit(2) finally: self.cleanup_browsers() self.stop_application()
def start(self): self._parseArgs() self._setupLogging() self.validateArgs() if self.args.debug: try: import ipdb except ImportError: print "--debug requires module 'ipdb'" return -1 with ipdb.launch_ipdb_on_exception(): self.run() elif self.args.profile: l = locals() cProfile.runctx("_rv=self.run()", globals(), l, "profile.out") pstats.Stats("profile.out").sort_stats("time").print_stats(20) return l["_rv"] else: return self.run()
def _handle_PacketIn(self, event): packet = event.parsed if packet.type == ethernet.LLDP_TYPE: self.handle_lldp(packet,event) return elif packet.type == 0x86dd: # IGNORE IPV6 return if self.show_traces: self.packetno += 1 print "-------- POX/OF RECV %d ---------------" % self.packetno print event.connection print event.ofp print "port\t%s" % event.port print "data\t%s" % packetlib.ethernet(event.data) print "dpid\t%s" % event.dpid print recv_packet = self.packet_from_pox(event.dpid, event.ofp.in_port, event.data) if self.debug_packet_in == "1": ipdb.set_trace() with ipdb.launch_ipdb_on_exception(): output = self.policy.eval(self.network, recv_packet) if self.debug_packet_in == "drop" and not output: ipdb.set_trace() output = self.policy.eval(self.network, recv_packet) # So we can step through it if self.show_traces: print "<<<<<<<<< RECV <<<<<<<<<<<<<<<<<<<<<<<<<<" print util.repr_plus([recv_packet], sep="\n\n") print print ">>>>>>>>> SEND >>>>>>>>>>>>>>>>>>>>>>>>>>" print util.repr_plus(output.elements(), sep="\n\n") print for pkt in output.elements(): self.send_packet(pkt)
def handle(self, *args, **options): # Setup the log level for root logger loglevel = self.verbosity_loglevel.get(options["verbosity"]) logging.getLogger().setLevel(loglevel) # from debugsqlshell if options["debugsql"]: from datetime import datetime from django.db.backends import util from debug_toolbar.utils import ms_from_timedelta, sqlparse class PrintQueryWrapper(util.CursorDebugWrapper): def execute(self, sql, params=()): starttime = datetime.now() try: return self.cursor.execute(sql, params) finally: try: raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params) execution_time = datetime.now() - starttime print sqlparse.format(raw_sql, reindent=True), print " [%.2fms]" % (ms_from_timedelta(execution_time),) print except UnicodeEncodeError: print "UnicodeEncodeError" util.CursorDebugWrapper = PrintQueryWrapper # When debugging, launch ipdb on exception if DEBUG_MIGRATIONS: # Exception launches ipdb https://github.com/Psycojoker/ipdb#use from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): self._run_migrations(options["debugsql"], *args) else: # Just run the migrations self._run_migrations(options["debugsql"], *args)
def spider(self): import ipdb url = None with ipdb.launch_ipdb_on_exception(): try: url = furl(redis.spop(FETCH_SET)) while url: url = self.preprocess_url(url) if not url or not str(url) or redis.sismember(SEEN_SET, str(url)): # Already have this request stored (or don't want it) log.debug('skipping: %s', url) url = furl(redis.spop(FETCH_SET)) continue response = self.request(url) self.url_write(url, response) redis.sadd(SEEN_SET, str(url)) try: urls = set([str(x) for x in self.extract_links(response) if str(x)]) urls = [x for x in urls if not redis.sismember(SEEN_SET, x)] if len(urls): redis.sadd(FETCH_SET, *urls) with open(self.filename_for(url, ext='urls'), 'w') as fh: fh.write("\n".join(urls) + "\n") except Exception as e: log.error("Failed to parse URLs from %s: %s" % (url, e)) url = furl(redis.spop(FETCH_SET)) url = None finally: if url: redis.sadd(FETCH_SET, str(url)) self.url_delete(url)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Run the app from ipdb import launch_ipdb_on_exception from packagesample import start with launch_ipdb_on_exception(): start.main()
def main(argv): progname = 'bedup' if is_in_path('bedup') else 'python3 -m bedup' io_enc = codecs.lookup(locale.getpreferredencoding()).name if io_enc == 'ascii': print( 'bedup will abort because Python was configured to use ASCII ' 'for console I/O.\nSee https://git.io/vnzk6 which ' 'explains how to use a UTF-8 locale.', file=sys.stderr) return 1 parser = argparse.ArgumentParser(prog=progname) parser.add_argument( '--debug', action='store_true', help=argparse.SUPPRESS) commands = parser.add_subparsers(dest='command', metavar='command') sp_scan_vol = commands.add_parser( 'scan', help='Scan', description=""" Scans volumes to keep track of potentially duplicated files.""") sp_scan_vol.set_defaults(action=vol_cmd) scan_flags(sp_scan_vol) # In Python 3.2+ we can add aliases here. # Hidden aliases doesn't seem supported though. sp_dedup_vol = commands.add_parser( 'dedup', help='Scan and deduplicate', description=""" Runs scan, then deduplicates identical files.""") sp_dedup_vol.set_defaults(action=vol_cmd) scan_flags(sp_dedup_vol) sp_dedup_vol.add_argument( '--defrag', action='store_true', help='Defragment files that are going to be deduplicated') # An alias so as not to break btrfs-time-machine. # help='' is unset, which should make it (mostly) invisible. sp_dedup_vol_compat = commands.add_parser( 'dedup-vol', description=""" A deprecated alias for the 'dedup' command.""") sp_dedup_vol_compat.set_defaults(action=vol_cmd) scan_flags(sp_dedup_vol_compat) sp_reset_vol = commands.add_parser( 'reset', help='Reset tracking metadata', description=""" Reset tracking data for the listed volumes. Mostly useful for testing.""") sp_reset_vol.set_defaults(action=vol_cmd) vol_flags(sp_reset_vol) sp_show_vols = commands.add_parser( 'show', help='Show metadata overview', description=""" Shows filesystems and volumes with their tracking status.""") sp_show_vols.set_defaults(action=cmd_show_vols) sp_show_vols.add_argument('fsuuid_or_device', nargs='?') sp_show_vols.add_argument( '--show-deleted', dest='show_deleted', action='store_true', help='Show volumes that have been deleted') sql_flags(sp_show_vols) sp_find_new = commands.add_parser( 'find-new', help='List changed files', description=""" lists changes to volume since generation This is a reimplementation of btrfs find-new, modified to include directories as well.""") sp_find_new.set_defaults(action=cmd_find_new) sp_find_new.add_argument( '-0|--zero-terminated', dest='zero_terminated', action='store_true', help='Use a NUL character as the line separator') sp_find_new.add_argument( '--terse', dest='terse', action='store_true', help='Print names only') sp_find_new.add_argument('volume', help='Volume to search') sp_find_new.add_argument( 'generation', type=int, nargs='?', default=0, help='Only show items modified at generation or a newer transaction') sp_forget_fs = commands.add_parser( 'forget-fs', help='Wipe all metadata', description=""" Wipe all metadata for the listed filesystems. Useful if the filesystems don't exist anymore.""") sp_forget_fs.set_defaults(action=cmd_forget_fs) sp_forget_fs.add_argument('uuid', nargs='+', help='Btrfs filesystem uuids') sql_flags(sp_forget_fs) sp_dedup_files = commands.add_parser( 'dedup-files', help='Deduplicate listed', description=""" Freezes listed files, checks them for being identical, and projects the extents of the first file onto the other files. The effects are visible with filefrag -v (apt:e2fsprogs), which displays the extent map of files. """.strip()) sp_dedup_files.set_defaults(action=cmd_dedup_files) sp_dedup_files.add_argument('source', metavar='SRC', help='Source file') sp_dedup_files.add_argument( 'dests', metavar='DEST', nargs='+', help='Dest files') # Don't forget to also set new options in the dedup-vol test in vol_cmd sp_dedup_files.add_argument( '--defrag', action='store_true', help='Defragment the source file first') sp_generation = commands.add_parser( 'generation', help='Display volume generation', description=""" Display the btrfs generation of VOLUME.""") sp_generation.set_defaults(action=cmd_generation) sp_generation.add_argument('volume', help='Btrfs volume') sp_generation.add_argument( '--flush', action='store_true', dest='flush', help='Flush outstanding data using syncfs before lookup') sp_size_lookup = commands.add_parser( 'size-lookup', help='Look up inodes by size', description=""" List tracked inodes with a given size.""") sp_size_lookup.set_defaults(action=cmd_size_lookup) sp_size_lookup.add_argument('size', type=int) sp_size_lookup.add_argument( '-0|--zero-terminated', dest='zero_terminated', action='store_true', help='Use a NUL character as the line separator') sql_flags(sp_size_lookup) sp_shell = commands.add_parser( 'shell', description=""" Run an interactive shell (useful for prototyping).""") sp_shell.set_defaults(action=cmd_shell) sql_flags(sp_shell) sp_fake_updates = commands.add_parser( 'fake-updates', description=""" Fake inode updates from the latest dedup events (useful for benchmarking).""") sp_fake_updates.set_defaults(action=cmd_fake_updates) sp_fake_updates.add_argument('max_events', type=int) sql_flags(sp_fake_updates) # Give help when no subcommand is given if not argv[1:]: parser.print_help() return args = parser.parse_args(argv[1:]) if args.debug: try: from ipdb import launch_ipdb_on_exception except ImportError: sys.stderr.write( 'Please install bedup[interactive] for this feature\n') return 1 with launch_ipdb_on_exception(): # Handle all warnings as errors. # Overrides the default filter that ignores deprecations # and prints the rest. warnings.simplefilter('error') warnings.filterwarnings('ignore', module='IPython\..*') warnings.filterwarnings('ignore', module='alembic\..*') return args.action(args) else: try: return args.action(args) except IOError as err: if err.errno == errno.EPERM: sys.stderr.write( "You need to run this command as root.\n") return 1 raise
def main(debug=False): configure_logging() args = None # to have args variable in except block when parse_args fails try: # Note: We parse the args here once to check whether to show bears or # not. args = default_arg_parser().parse_args() if args.debug: req_ipdb = PipRequirement('ipdb') if not req_ipdb.is_installed(): logging.error('--debug flag requires ipdb. ' 'You can install it with:\n%s', ' '.join(req_ipdb.install_command())) sys.exit(13) if debug or args.debug: args.log_level = 'DEBUG' # Defer imports so if e.g. --help is called they won't be run from coalib.coala_modes import ( mode_format, mode_json, mode_non_interactive, mode_normal) from coalib.output.ConsoleInteraction import ( show_bears, show_language_bears_capabilities) console_printer = ConsolePrinter(print_colored=not args.no_color) configure_logging(not args.no_color) if args.show_bears: from coalib.settings.ConfigurationGathering import get_all_bears kwargs = {} if args.bears: kwargs['bear_globs'] = args.bears filtered_bears = get_all_bears(**kwargs) if args.filter_by_language: logging.warning( "'--filter-by-language ...' is deprecated. " "Use '--filter-by language ...' instead.") if args.filter_by is None: args.filter_by = [] args.filter_by.append(['language'] + args.filter_by_language) if args.filter_by: # Each iteration of the following loop applies # filters one by one provided as arguments try: args.filter_by = filter_vector_to_dict(args.filter_by) filtered_bears = apply_filters( args.filter_by, filtered_bears) except (InvalidFilterException, NotImplementedError) as ex: # If filter is not available or is unusable console_printer.print(ex) return 2 local_bears, global_bears = filtered_bears show_bears(local_bears, global_bears, args.show_description or args.show_details, args.show_details, console_printer, args) return 0 elif args.show_capabilities: from coalib.collecting.Collectors import ( filter_capabilities_by_languages) local_bears, _ = apply_filter('language', args.show_capabilities) capabilities = filter_capabilities_by_languages( local_bears, args.show_capabilities) show_language_bears_capabilities(capabilities, console_printer) return 0 if args.json: return mode_json(args, debug=debug) except BaseException as exception: # pylint: disable=broad-except if not isinstance(exception, SystemExit): if args and args.debug: import ipdb with ipdb.launch_ipdb_on_exception(): raise if debug: raise return get_exitcode(exception) if args.format: return mode_format(args, debug=debug) if args.non_interactive: return mode_non_interactive(console_printer, args, debug=debug) return mode_normal(console_printer, None, args, debug=debug)
def main(self): # This looks scary but it's not. Python uses reference # counting and has a secondary, optional garbage collector for # collecting garbage cycles. Unfortunately when a cyclic GC # happens when a thread is calling cPickle.dumps, the # interpreter crashes sometimes. See Bug 19704. Since we # don't leak garbage cycles, disabling the cyclic GC is # essentially harmless. gc.disable() random.seed(42) if options.pdb or options.pdbAtStartup: print("Process ID: %d" % os.getpid(), file=sys.stderr) try: import ipdb except ImportError: die("Debugging options require 'ipdb' package installed.") if not options.threaded: die("Debugging only works with -T (threaded) mode") if options.pdbAtStartup: ipdb.set_trace() logging.info("ConsensusCore version: %s" % (consensusCoreVersion() or "ConsensusCore unavailable")) logging.info("ConsensusCore2 version: %s" % (consensusCore2Version() or "ConsensusCore2 unavailable")) logging.info("Starting.") atexit.register(self._cleanup) if options.doProfiling: self._makeTemporaryDirectory() with AlignmentSet(options.inputFilename) as peekFile: if options.algorithm == "arrow" and peekFile.isCmpH5: die("Arrow does not support CmpH5 files") if not peekFile.isCmpH5 and not peekFile.hasPbi: die("Genomic Consensus only works with cmp.h5 files and BAM " "files with accompanying .pbi files") logging.info("Peeking at file %s" % options.inputFilename) logging.info("Input data: numAlnHits=%d" % len(peekFile)) resolveOptions(peekFile) self._loadReference(peekFile) self._checkFileCompatibility(peekFile) self._algorithm = self._algorithmByName(options.algorithm, peekFile) self._configureAlgorithm(options, peekFile) options.disableHdf5ChunkCache = True #options.disableHdf5ChunkCache = self._shouldDisableChunkCache(peekFile) #if options.disableHdf5ChunkCache: # logging.info("Will disable HDF5 chunk cache (large number of datasets)") self._launchSlaves() self._readAlignmentInput() monitoringThread = threading.Thread(target=monitorSlaves, args=(self,)) monitoringThread.start() try: if options.doProfiling: cProfile.runctx("self._mainLoop()", globals=globals(), locals=locals(), filename=os.path.join(options.temporaryDirectory, "profile-main.out")) elif options.pdb: with ipdb.launch_ipdb_on_exception(): self._mainLoop() else: self._mainLoop() except BaseException as exc: msg = 'options={}'.format(pprint.pformat(vars(options))) logging.exception(msg) self.abortWork(repr(exc)) monitoringThread.join() if self._aborting: logging.error("Aborting") return -1 else: logging.info("Finished.") if options.doProfiling: self._printProfiles() # close h5 file. self._inAlnFile.close() return 0
def run(self, test_list=None, shared_data=None): global _current_test_case if not self.configured: raise Exception("TestRunner was not configured before run() was called") if test_list: for test_name in test_list: ok = False for test_case in self.test_cases: if test_case.__class__.__name__ == test_name: ok = True break if not ok: sys.stderr.write("The test case '%s' does not exist\n" % test_name) self._shutdown() return 1 sys.stderr.write("Only testing the following cases: %s\n" % ', '.join(test_list)) # Big try block to make sure we shut down the server if anything goes # wrong. failed = 0 browser = None try: # Do some stuff browser = Remote( self.selenium_server_url.encode('ascii'), desired_capabilities = { 'browserName': self.selenium_browser, }, ) browser.set_window_size(1024, 650) if shared_data is None: shared_data = dict() for test_case in self.test_cases: if test_list: # Only run the named tests, plus those on the "critical path" skip = True if test_case.critical_path: skip = False elif test_list: for test_name in test_list: if test_name == test_case.__class__.__name__: skip = False if skip: continue test_case.browser = browser test_case.selenium_browser = self.selenium_browser test_case.test_runner = self if self.selenium_browser == 'firefox': test_case.slowdown = .5 test_case.shared = shared_data test_case.banner() _current_test_case = test_case if self.wait_after_exception: from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): test_case.run() else: test_case.run() test_case.done() _current_test_case = None failed += test_case.failed except KeyboardInterrupt: print "interrupt" failed += 1 except: failed += 1 traceback.print_exc() if browser: browser.quit() self._shutdown() return failed
def handle(self, *args, **options): for exercice in Exercice.objects.all(): from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): exercice.check_answers({}) for exercice in Exercice.objects.filter(testable_online=True): with launch_ipdb_on_exception(): for number, (question, answers) in enumerate(exercice.get_questions().items()): if answers["type"] == "text": for answer in answers["answers"]: assert exercice.check_answers({ str(number): answer })["answers"][number]["correct"] == True assert exercice.check_answers({ str(number): "this is not a valid answer" })["answers"][number]["correct"] == False elif answers["type"] == "checkbox": number_of_checkboxs = len(answers["answers"]) true_false = (True, False) # generate all number of possibilies matrice bin_matrix = [(a, b, c, d, e, f, g) for a in true_false for b in true_false for c in true_false for d in true_false for e in true_false for f in true_false for g in true_false] # reduce the number of possibilies to the number of checkbox bin_matrix = [x[:number_of_checkboxs] for x in bin_matrix[:number_of_checkboxs]] correct_answer = [str(n) for n, j in enumerate(answers["answers"].values()) if j] for line in bin_matrix: assert exercice.check_answers({ str(number): [str(n) for n, j in enumerate(line) if j] })["answers"][number]["correct"] == (line == correct_answer) # obviously broken answer exercice.check_answers({ str(number): map(str, range(1500)) }) elif answers["type"] == "radio": for radio_number, i in enumerate(answers["answers"].values()): assert exercice.check_answers({ str(number): radio_number })["answers"][number]["correct"] == i assert exercice.check_answers({ str(number): "9999999" })["answers"][number]["correct"] == False elif answers["type"] == "graph": pass elif answers["type"] == "math-simple": pass elif answers["type"] == "math-advanced": pass else: raise Exception("Exercice %s answers type is wrong: %s" % (exercice, answers.get("type"))) from ipdb import set_trace; set_trace()
class WritableFieldPatch(PatchSuite): class Meta: parent = rest_fields.WritableField #TODO tuple? if many places to patch from_native = patch() @patch(wrapper_type=wrappers.Hook, pass_event=True, event_class=FieldFromNativeEvent) def field_from_native(self, data, files, field_name, into, return_value, event): event.published_context = ('log_prefix',) event.__dict__.update({ 'log_prefix': '-> ', 'field_value': into.get(field_name), 'field_name': field_name, }) if __name__ == '__main__': def do(): from rest_framework.test import APIClient cl = APIClient() cl.login(username='******', password='******') resp = cl.post('/snippets/', {'title': 'titlu', 'code': 'codu'}) print (resp.data) import ipdb with ipdb.launch_ipdb_on_exception(): with (SerializerPatch() + WritableFieldPatch()): do()
def inner(*args, **kwargs): import ipdb with ipdb.launch_ipdb_on_exception(): return func(*args, **kwargs)
def run_coala(console_printer=None, log_printer=None, print_results=do_nothing, acquire_settings=fail_acquire_settings, print_section_beginning=do_nothing, nothing_done=do_nothing, autoapply=True, force_show_patch=False, arg_parser=None, arg_list=None, args=None, debug=False, cache=None): """ This is a main method that should be usable for almost all purposes and reduces executing coala to one function call. :param console_printer: Object to print messages on the console. :param log_printer: A LogPrinter object to use for logging. :param print_results: A callback that takes a LogPrinter, a section, a list of results to be printed, the file dict and the mutable file diff dict. :param acquire_settings: The method to use for requesting settings. It will get a parameter which is a dictionary with the settings name as key and a list containing a description in [0] and the names of the bears who need this setting in all following indexes. :param print_section_beginning: A callback that will be called with a section name string whenever analysis of a new section is started. :param nothing_done: A callback that will be called with only a log printer that shall indicate that nothing was done. :param autoapply: Set this to false to not autoapply any actions. If you set this to `False`, `force_show_patch` will be ignored. :param force_show_patch: If set to True, a patch will be always shown. (Using ApplyPatchAction.) :param arg_parser: Instance of ArgParser that is used to parse non-setting arguments. :param arg_list: The CLI argument list. :param args: Alternative pre-parsed CLI arguments. :param debug: Run in debug mode, bypassing multiprocessing, and not catching any exceptions. :param cache: Instance of a FileCache instance. :return: A dictionary containing a list of results for all analyzed sections as key. """ all_actions_possible = provide_all_actions() apply_single = None if getattr(args, 'single_action', None) is not None: while True: for i, action in enumerate(all_actions_possible, 1): console_printer.print(format_lines('{}'.format( action), symbol='[')) line = format_lines(STR_ENTER_LETTER, symbol='[') choice = input(line) if choice.isalpha(): choice = choice.upper() choice = '(' + choice + ')' if choice == '(N)': apply_single = 'Do (N)othing' break for i, action in enumerate(all_actions_possible, 1): if choice in action: apply_single = action break if apply_single: break console_printer.print(format_lines( 'Please enter a valid letter.', symbol='[')) args.apply_patch = False exitcode = 0 sections = {} results = {} file_dicts = {} try: yielded_results = yielded_unfixed_results = False did_nothing = True sections, local_bears, global_bears, targets = gather_configuration( acquire_settings, arg_parser=arg_parser, arg_list=arg_list, args=args) logging.debug('Platform {} -- Python {}, coalib {}' .format(platform.system(), platform.python_version(), VERSION)) settings_hash = get_settings_hash(sections, targets) flush_cache = bool(sections['cli'].get('flush_cache', False) or settings_changed(None, settings_hash)) if cache is None and not sections['cli'].get('disable_caching', False): cache = FileDictFileCache(None, os.getcwd(), flush_cache) if targets: sections = OrderedDict( (section_name, sections[section_name]) for section_name in targets) # Collect all the filters and try to filter sections filters = collect_filters(args, arg_list, arg_parser) if len(filters) > 0: all_sections = list(sections.values()) try: filtered = apply_filters(filters, sections=all_sections) sections = OrderedDict( (sect.name.lower(), sect) for sect in filtered) except (InvalidFilterException, NotImplementedError) as ex: console_printer.print(ex) for section_name, section in sections.items(): if not section.is_enabled(targets): continue if not autoapply: section['default_actions'] = '' elif force_show_patch: section['default_actions'] = '*: ShowPatchAction' section['show_result_on_top'] = 'yeah' print_section_beginning(section) section_result = execute_section( section=section, global_bear_list=global_bears[section_name], local_bear_list=local_bears[section_name], print_results=print_results, cache=cache, log_printer=None, console_printer=console_printer, debug=debug or args and args.debug, apply_single=(apply_single if apply_single is not None else False)) yielded, yielded_unfixed, results[section_name] = ( simplify_section_result(section_result)) yielded_results = yielded_results or yielded yielded_unfixed_results = ( yielded_unfixed_results or yielded_unfixed) did_nothing = False file_dicts[section_name] = section_result[3] update_settings_db(None, settings_hash) if cache: cache.write() if CounterHandler.get_num_calls_for_level('ERROR') > 0: exitcode = 1 elif did_nothing: nothing_done(None) exitcode = 2 elif yielded_unfixed_results: exitcode = 1 elif yielded_results: exitcode = 5 except BaseException as exception: # pylint: disable=broad-except if not isinstance(exception, SystemExit): if args and args.debug or ( sections and sections.get('cli', {}).get('debug', False) ): import ipdb with ipdb.launch_ipdb_on_exception(): raise if debug: raise exitcode = exitcode or get_exitcode(exception) return results, exitcode, file_dicts
def main(self): # This looks scary but it's not. Python uses reference # counting and has a secondary, optional garbage collector for # collecting garbage cycles. Unfortunately when a cyclic GC # happens when a thread is calling cPickle.dumps, the # interpreter crashes sometimes. See Bug 19704. Since we # don't leak garbage cycles, disabling the cyclic GC is # essentially harmless. gc.disable() parseOptions() self._algorithm = self._algorithmByName(options.algorithm) self._setupLogging() random.seed(42) logging.info("h5py version: %s" % h5py.version.version) logging.info("hdf5 version: %s" % h5py.version.hdf5_version) logging.info("ConsensusCore version: %s" % (consensusCoreVersion() or "ConsensusCore unavailable")) logging.info("Starting.") atexit.register(self._cleanup) if options.doProfiling: self._makeTemporaryDirectory() with AlignmentSet(options.inputFilename) as peekFile: if not peekFile.isCmpH5 and not peekFile.hasPbi: logging.warn("'fancyChunking' not yet available for BAM " "files without accompanying .pbi files, " "disabling") options.fancyChunking = False logging.info("Peeking at file %s" % options.inputFilename) logging.info("Input data: numAlnHits=%d" % len(peekFile)) resolveOptions(peekFile) self._loadReference(peekFile) self._checkFileCompatibility(peekFile) self._configureAlgorithm(options, peekFile) options.disableHdf5ChunkCache = True #options.disableHdf5ChunkCache = self._shouldDisableChunkCache(peekFile) #if options.disableHdf5ChunkCache: # logging.info("Will disable HDF5 chunk cache (large number of datasets)") #logging.debug("After peek, # hdf5 objects open: %d" % h5py.h5f.get_obj_count()) if options.dumpEvidence: self._setupEvidenceDumpDirectory(options.evidenceDirectory) self._launchSlaves() self._readCmpH5Input() monitoringThread = threading.Thread(target=monitorSlaves, args=(self,)) monitoringThread.start() try: if options.doProfiling: cProfile.runctx("self._mainLoop()", globals=globals(), locals=locals(), filename=os.path.join(options.temporaryDirectory, "profile-main.out")) elif options.doDebugging: if not options.threaded: die("Debugging only works with -T (threaded) mode") logging.info("PID: %d", os.getpid()) import ipdb with ipdb.launch_ipdb_on_exception(): self._mainLoop() else: self._mainLoop() except: why = traceback.format_exc() self.abortWork(why) monitoringThread.join() if self._aborting: logging.error("Aborting") return -1 else: logging.info("Finished.") if options.doProfiling: self._printProfiles() # close h5 file. self._inCmpH5.close() return 0
def help_on_exceptions(logger, func, *args, **kw): """Present exceptions in the form of a request to file an issue Calls func (typically a "main" function), and returns the return code. If an exception occurs, then a) if an error is logged to `logger`, we just return with 127, or b) otherwise, dump the stack trace and then return 127. If the 'DEBUG' environment variable is set then the exception is raised anyway. """ debug = os.environ.get('DEBUG', '') if '--ipdb' in sys.argv: from ipdb import launch_ipdb_on_exception with launch_ipdb_on_exception(): return func(*args, **kw) try: return func(*args, **kw) except KeyboardInterrupt: if debug: raise else: logger.info('Interrupted') return 127 except SystemExit: raise except ValidationError as e: if debug: raise else: logger.error(str(e)) return 127 except IOError as e: if debug: raise else: logger.error(str(e)) return 127 except RemoteFetchError as e: if debug: raise else: logger.error("You may wish to check your Internet connection or the remote server") return 127 except: if debug: raise else: if not logger.error_occurred: logger.error("Uncaught exception:") for line in traceback.format_exc().splitlines(): logger.info(line) text = """\ This exception has not been translated to a human-friendly error message, please file an issue at https://github.com/hashdist/hashdist/issues pasting this stack trace. """ text = textwrap.fill(textwrap.dedent(text), width=78) logger.info('') for line in text.splitlines(): logger.info(line) return 127