def unlock(test, logger, hash_key): """Unlocks TestCases for a given Test. PARAMETERS: test -- Test; the test to unlock. logger -- OutputLogger. DESCRIPTION: This function incrementally unlocks all TestCases in a specified Test. Students must answer in the order that TestCases are written. Once a TestCase is unlocked, it will remain unlocked. RETURN: int, bool; the number of cases that are newly unlocked for this Test after going through an unlocking session and whether the student wanted to exit the unlocker or not. """ console = UnlockConsole(logger, hash_key) cases = 0 cases_unlocked = 0 for suite in test['suites']: for case in suite: cases += 1 if not isinstance(case, UnlockTestCase) \ or not case['locked']: continue utils.underline('Case {}'.format(cases), line='-') if console.run(case): # Abort unlocking. return cases_unlocked, True cases_unlocked += 1 print("You are done unlocking tests for this question!") return cases_unlocked, False
def on_interact(self): """ Responsible for unlocking each test. """ if not self.args.unlock: return utils.print_title('Unlocking tests for {}'.format(self.assignment['name'])) print('At each "{}",'.format(UnlockConsole.PROMPT) + ' type in what you would expect the output to be.') print('Type {} to quit'.format(UnlockConsole.EXIT_INPUTS[0])) for test in self._filter_tests(): if test.num_cases == 0: print('No tests to unlock for {}.'.format(test.name)) else: utils.underline('Unlocking tests for {}'.format(test.name)) print() # TODO(albert): the unlock function returns the number # of unlocked test cases. This can be a useful metric # for analytics in the future. cases_unlocked, end_session = unlock(test, self.logger, self.assignment['hash_key']) if end_session: break print()
def _run_suite(suite, logger, cases_tested, verbose, interactive): """Runs tests for a single suite. PARAMETERS: suite -- list; each element is a TestCase logger -- OutputLogger. cases_tested -- Counter; an object that keeps track of the number of cases that have been tested so far. verbose -- bool; True if verbose mode is toggled on interactive -- bool; True if interactive mode is toggled on RETURNS: (passed, errored), where passed -- int; number of TestCases that passed errored -- bool; True if a TestCase resulted in error. """ passed = 0 for case in suite: if not isinstance(case, GradedTestCase): # TODO(albert): should non-GradedTestCases be counted as # passing? continue elif not case.should_grade(): logger.on() return passed, True # students must unlock first cases_tested.increment() utils.underline('Case {}'.format(cases_tested), line='-') error = case.on_grade(logger, verbose, interactive) if error: return passed, True passed += 1 logger.on() return passed, False
def main(): parser = argparse.ArgumentParser() parser.add_argument( "address", nargs="?", metavar="addr", type=str, help="Address to put into config", ) args = parser.parse_args() abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) # interactive mode if not args.address: print( "This tool will edit {} with your address; if you don't have one, one will be generated for you".format( FILE ) ) print(colorify("--------------", "green")) address = input( underline( "Please paste your QKC address (make sure you have the private key for it)" ) + ": " ) if not address: print( "your input is empty, so we will generate one for you; " + underline("be sure to keep the private key in a safe place") ) address = gen_address() touch_file(address) return else: address = args.address if address.startswith("0x"): address = address[2:] if len(address) == 40: print( "your input is ETH address, but it's OK, we actually just need the 20-byte address" ) touch_file(address) elif len(address) == 48: touch_file(address[:40]) else: print( colorify( "Wrong address length, please provide either 20-byte ETH address or 24-byte QKC address", "red", ) ) sys.exit(1)
def _grade_test(self, test): """Grades a single Test.""" utils.underline('Running tests for ' + test.name) print() if test['note']: print(test['note']) total_passed = grade(test, self.logger, self.args.interactive, self.args.verbose) total_cases = test.num_cases if total_cases > 0: print('== {} ({}%) cases passed for {} =='.format(total_passed, round(100 * total_passed / total_cases, 2), test.name)) if test.num_locked > 0: print('-- There are still {} locked test cases.'.format(test.num_locked) + \ ' Use the -u flag to unlock them. --') print()
def Slice(self, slice_point, bottom, axis=1, phase=None): self.setup(underline('slice', bottom), 'Slice', bottom=[bottom]) if axis != 1: self.this.slice_param.axis = axis if phase is not None: self.include(phase) while True: if len(self.this.top): self.this.top.pop() else: break a = underline('a', bottom) b = underline('b', bottom) self.this.top.extend([a, b]) if not isinstance(slice_point, list): slice_point = [slice_point] self.this.slice_param.slice_point.extend(slice_point) return self.this.name, a, b
def train(self, data_root, **kwargs): """Start training process. Args: data_root: path to dataset, should contain a subdirectory named 'train' (and optionally 'val') Kwargs (optional): metrics: a list of functions for computing metrics checkpoint: path to checkpoint for resuming training epochs: number of epochs for training batch_size: mini-batch size for training proportion: proportion of training data to be used """ # Merge configurations. self.kwargs = {**self.kwargs, **kwargs} self.optimizer, self.scheduler = self.get_default_optimizer() self.load_checkpoint(self.kwargs.get('checkpoint')) self.logger.addHandler( logging.FileHandler(self.record_dir / 'train.log')) serializable_kwargs = { k: v for k, v in self.kwargs.items() if isinstance(v, (int, float, str, tuple)) } record.save_params(self.record_dir, serializable_kwargs) self.logger.info(str(serializable_kwargs) + '\n') self.tracker.save_path = self.record_dir / 'history.csv' data_root = Path(data_root) train_path = data_root / 'train' val_path = data_root / 'val' train_dataset = self.get_default_dataset(train_path, proportion=self.kwargs.get( 'proportion', 1)) train_dataset.summary(logger=self.logger) self.dataloaders = { 'train': torch.utils.data.DataLoader( train_dataset, batch_size=self.kwargs.get('batch_size'), shuffle=True, num_workers=os.cpu_count()) } if val_path.exists(): val_dataset = self.get_default_dataset(val_path, train=False) val_dataset.summary(logger=self.logger) self.dataloaders['val'] = torch.utils.data.DataLoader( val_dataset, batch_size=1, num_workers=os.cpu_count()) self.logger.info(underline('\nTraining Stage', '=')) self.metric_funcs = self.kwargs.get('metrics') epochs = self.kwargs.get('epochs') total_epochs = epochs + self.initial_epoch - 1 for epoch in range(self.initial_epoch, total_epochs + 1): self.logger.info( underline('\nEpoch {}/{}'.format(epoch, total_epochs), '-')) self.tracker.start_new_epoch(self.optimizer.param_groups[0]['lr']) self.train_one_epoch(no_val=(not val_path.exists())) self.post_epoch_hook(epoch) # save metrics to csv file self.tracker.save() # save learning curves record.plot_learning_curves(self.tracker.save_path) # save checkpoints for resuming training ckpt_path = self.record_dir / \ 'checkpoints' / f'ckpt.{epoch:04d}.pth' self.save_checkpoint( ckpt_path, epoch=epoch, optimizer_state_dict=self.optimizer.state_dict()) # remove previous checkpoints for ckpt_path in sorted( (self.record_dir / 'checkpoints').glob('*.pth'))[:-1]: os.remove(ckpt_path) self.logger.info(self.tracker.report())
rot_cw = lambda im: rotate(im, -ROTATE_ANGLE) apply_modification_and_save(img, img_fp, 'rot_cw', rot_cw) if do_skew: skew_r = lambda im: skew(im, SKEW_ANGLE) apply_modification_and_save(img, img_fp, 'skew_r', skew_r) skew_l = lambda im: skew(im, -SKEW_ANGLE) apply_modification_and_save(img, img_fp, 'skew_l', skew_l) if do_blur: blur_ = lambda im: blur(im, BLUR_RADIUS) apply_modification_and_save(img, img_fp, 'blur', blur_) if do_underline: ul = lambda im: underline(im, FONT_SIZE, BORDER) apply_modification_and_save(img, img_fp, 'ul', ul) if do_complex: skew_r_blur = lambda im: blur(skew(im, SKEW_ANGLE), BLUR_RADIUS) apply_modification_and_save(img, img_fp, 'skew_r_blur', skew_r_blur) skew_l_blur = lambda im: blur(skew(im, -SKEW_ANGLE), BLUR_RADIUS) apply_modification_and_save(img, img_fp, 'skew_l_blur', skew_l_blur) num_mods = 1 + (2 * do_rotate) + (2 * do_skew) + (1 * do_blur) + ( 1 * do_underline) + (2 * do_complex) num_pics = len(font_names) * len(tokens) * num_mods print('\n%d pictures output to \'%s\'\n' % (num_pics, output_dir))