def test_fullRun(self): target_repo, source1_repo, source2_repo = \ testhelper.standard_simple_config(num_stable_sources=2) # Set up a merge package = testhelper.build_and_import_simple_package( 'foo', '1.0-1', source1_repo) forked = copy(package) forked.changelog_entry(version='1.0-1mom1') open(forked.pkg_path + '/debian/new.file', 'w').write('hello') forked.build() target_repo.importPackage(forked) package.changelog_entry(version='1.2-1') package.create_orig() package.build() source2_repo.importPackage(package) # Set up a conflict package = testhelper.build_and_import_simple_package( 'bar', '2.0-1', source1_repo) forked = copy(package) forked.changelog_entry(version='2.0-1mom1') open(forked.pkg_path + '/debian/new.file', 'w').write('hello') forked.build() target_repo.importPackage(forked) package.changelog_entry(version='2.2-1') open(package.pkg_path + '/debian/new.file', 'w').write('conflicts') package.create_orig() package.build() source2_repo.importPackage(package) # Set up a sync package = testhelper.build_and_import_simple_package( 'eek', '3.0-1', target_repo) updated = copy(package) updated.changelog_entry(version='3.1-1') updated.create_orig() updated.build() source1_repo.importPackage(updated) # Run the program parser = get_option_parser() main.options(parser) options, args = parser.parse_args() main.main(options, [])
def test_options(capfd) -> None: """ Test display options. """ # Display options main.options() # Capture output out, err = capfd.readouterr() line1 = "Rock Gun Lighting\n" line2 = "Devil Dragon Water\n" line3 = "Air Paper Sponge\n" line4 = "Wolf Tree Human\n" line5 = "Snake Scissors Fire\n" assert line1 in out assert line2 in out assert line3 in out assert line4 in out assert line5 in out
def main(): from main import options, Model_Checkpoints from models.posenet import PoseNet opts = options() mode = opts.mode model = PoseNet(nstack=opts.nstack, inp_dim=opts.inp_dim, oup_dim=opts.oup_dim) print(">>> total params: {:.2f}M".format( sum(p.numel() for p in model.parameters()) / 1000000.0)) optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr) epoch = Model_Checkpoints(opts).load_checkpoints(model, optimizer) print("Use the model which is trained by {} epoches".format(epoch)) if opts.continue_exp is None: print("Warning: you must choose a trained model") def runner(imgs): return test_func(model, imgs=torch.Tensor(np.float32(imgs)))['preds'] def do(image_id, img): ans, scores = multiperson(img, runner, mode) if len(ans) > 0: ans = ans[:, :, :3] pred = genDtByPred(ans, image_id) for i, score in zip(pred, scores): i['score'] = float(score) return pred gts = [] preds = [] idx = 0 for image_id, img in get_img(inp_res=-1): idx += 1 preds.append(do(image_id, img)) prefix = os.path.join('checkpoint', opts.continue_exp) coco_eval(prefix, preds, gts)
def test_part_2(self): adapters = self.adapters adapters.append(max(adapters) + 3) opts = options(adapters, i=0, hits=([1] * len(adapters))) self.assertEqual(opts, 8)
def your_info(self,user): print(f'|Username: {self.username}|\n' f'|Coins: {self.coins}|\n' f'|Score: {self.score}|') options(user)
def leaderboard(self,user): print(f'In Works') options(user)