Пример #1
0
 def test_recover(self):
     self.assertEqual(recover.recover("漢 漢 10 漢", "漢 漢10漢"), "漢 漢10漢")
     self.assertEqual(recover.recover("漢 漢 10 漢", "漢 漢 1 0漢"), "漢 漢 10 漢")
     self.assertEqual(recover.recover("漢 漢 10 20 漢", "漢 漢1 020漢"), "漢 漢 10 20漢")
     self.assertEqual(recover.recover("漢 漢 10 20 漢 漢", "漢 漢1 020漢 漢"), "漢 漢 10 20漢 漢")
     self.assertEqual(recover.recover("漢 漢 , 漢 , 漢", "漢 漢 , 漢 , 漢"), "漢 漢 , 漢 , 漢")
     self.assertEqual(recover.recover("我 喜 歓 surface的pro 。", "我 喜歓sur face的pro 。"), "我 喜歓 surface的pro 。")
     self.assertEqual(recover.recover("我 喜 歓 surface的pro 。", "我 喜歓sur face的pro。"), "我 喜歓 surface的pro 。")
     self.assertEqual(recover.recover("我 喜 歓 surface的pro 。", "我 喜歓 surface的pro。"), "我 喜歓 surface的pro。")
Пример #2
0
	def __init__(self, parent):
		if printme: print >>sys.stderr, '__init__'
		QWidget.__init__(self, parent)
		self.parent = parent
		self.ui = parent.ui
		self.protocol = self.parent.protocol
		
		# defaults
		self.sending = 0
		self.eled = led.LED(self.ui.eraseLed)
		self.tled = led.LED(self.ui.transferLed)
		self.vled = led.LED(self.ui.verifyLed)
		self.clearLeds()
		self.lastTarget = None

		# default target addresses and menu setup
			# name, transferObject(parent, filename, target address, header choice, who for)
		self.targets = [
			['Main Boot',	sRecordTransfer(parent, '', MAIN_BOOT,		0, MAIN_CPU, 'little')],
			['Main App L',	sRecordTransfer(parent, '', MAIN_APP_LEFT,	1, MAIN_CPU, 'little')],
			['Main App R',	sRecordTransfer(parent, '', MAIN_APP_RIGHT,	1, MAIN_CPU, 'little')]
			]
		for entry in self.targets:
			self.ui.targetSelect.addItem(entry[0])
			entry[1].progress.connect(self.progress)
			entry[1].done.connect(self.srecordDone)
			entry[1].eraseFail.connect(self.eled.error)
			entry[1].transferFail.connect(self.tled.error)
			entry[1].verifyFail.connect(self.vled.error)
			entry[1].eraseDone.connect(self.eled.on)
			entry[1].transferDone.connect(self.tled.on)
			entry[1].verifyDone.connect(self.vled.on)
			entry[1].eraseStart.connect(self.eled.blink)
			entry[1].transferStart.connect(self.tled.blink)
			entry[1].verifyStart.connect(self.vled.blink)
			entry[1].starting.connect(self.eled.off)
			entry[1].starting.connect(self.tled.off)
			entry[1].starting.connect(self.vled.off)

		self.ui.targetSelect.setCurrentIndex(0)
		self.showSrecordValues()
		
		# recovery setup
		self.recovering = 0
		self.recover = recover(parent)
		self.recover.done.connect(self.recoverDone)
		self.recover.failed.connect(self.recoverDone)

		# connections for UI
		self.ui.sendSrecord.pressed.connect(self.sendSrecord)
		self.ui.fileSelect.pressed.connect(self.selectFile)
		self.ui.Version.pressed.connect(self.getVersion)
		self.ui.Recover.pressed.connect(self.selectRecover)
		self.ui.targetSelect.currentIndexChanged.connect(self.saveSrecordValues)		
		self.ui.reboot.clicked.connect(self.reboot)
		self.ui.Run.clicked.connect(self.runTarget)
		self.ui.verifyLed.clicked.connect(self.runVerify)
Пример #3
0
 def test_recover(self):
     self.assertEqual(recover.recover("漢 漢 10 漢", "漢 漢10漢"), "漢 漢10漢")
     self.assertEqual(recover.recover("漢 漢 10 漢", "漢 漢 1 0漢"), "漢 漢 10 漢")
     self.assertEqual(recover.recover("漢 漢 10 20 漢", "漢 漢1 020漢"),
                      "漢 漢 10 20漢")
     self.assertEqual(recover.recover("漢 漢 10 20 漢 漢", "漢 漢1 020漢 漢"),
                      "漢 漢 10 20漢 漢")
     self.assertEqual(recover.recover("漢 漢 , 漢 , 漢", "漢 漢 , 漢 , 漢"),
                      "漢 漢 , 漢 , 漢")
     self.assertEqual(
         recover.recover("我 喜 歓 surface的pro 。", "我 喜歓sur face的pro 。"),
         "我 喜歓 surface的pro 。")
     self.assertEqual(
         recover.recover("我 喜 歓 surface的pro 。", "我 喜歓sur face的pro。"),
         "我 喜歓 surface的pro 。")
     self.assertEqual(
         recover.recover("我 喜 歓 surface的pro 。", "我 喜歓 surface的pro。"),
         "我 喜歓 surface的pro。")
def dehaze(image_path, airlight=np.array([9.5, 10, 9.5])):
    """ Dehazes an image using color-lines and a given airlight vector. """
    airlight = airlight / np.linalg.norm(airlight)
    img = cv2.imread(image_path) / 255
    transmission_image = np.zeros(img.shape[:2])
    sigma_image = np.zeros(img.shape[:2])
    sliding_window = SlidingWindow(img, scans=5)

    for window in sliding_window:
        patch = window.patch
        color_line = ransac(patch, iterations=3)

        if color_line.valid(airlight):
            transmission_image[window.y, window.x] = color_line.transmission
            sigma_image[window.y, window.x] = color_line.sigma(airlight)

    transmission_image = clip(transmission_image)
    transmission_image = interpolate(transmission_image)
    # transmission_image = mrf_interpolate(transmission_image, sigma_image, img)  # Uncomment for mrf
    img = recover(img, transmission_image, airlight)
    return img
Пример #5
0
def sanity_check(localhost, duthost, request, fanouthosts, tbinfo):
    logger.info("Start pre-test sanity check")

    skip_sanity = False
    allow_recover = False
    recover_method = "adaptive"
    check_items = set(copy.deepcopy(
        constants.DEFAULT_CHECK_ITEMS))  # Default check items
    post_check = False

    customized_sanity_check = None
    for m in request.node.iter_markers():
        logger.info("Found marker: m.name=%s, m.args=%s, m.kwargs=%s" %
                    (m.name, m.args, m.kwargs))
        if m.name == "sanity_check":
            customized_sanity_check = m
            break

    if customized_sanity_check:
        logger.info("Process marker %s in script. m.args=%s, m.kwargs=%s" %
                    (m.name, str(m.args), str(m.kwargs)))
        skip_sanity = customized_sanity_check.kwargs.get("skip_sanity", False)
        allow_recover = customized_sanity_check.kwargs.get(
            "allow_recover", False)
        recover_method = customized_sanity_check.kwargs.get(
            "recover_method", "adaptive")
        if allow_recover and recover_method not in constants.RECOVER_METHODS:
            pytest.warning("Unsupported recover method")
            logger.info(
                "Fall back to use default recover method 'config_reload'")
            recover_method = "config_reload"

        check_items = _update_check_items(
            check_items, customized_sanity_check.kwargs.get("check_items", []),
            constants.SUPPORTED_CHECK_ITEMS)
        post_check = customized_sanity_check.kwargs.get("post_check", False)

    if request.config.option.skip_sanity:
        skip_sanity = True
    if request.config.option.allow_recover:
        allow_recover = True
    items = request.config.getoption("--check_items")
    if items:
        items_array = str(items).split(',')
        check_items = _update_check_items(check_items, items_array,
                                          constants.SUPPORTED_CHECK_ITEMS)

    # ignore BGP check for particular topology type
    if tbinfo['topo']['type'] == 'ptf' and 'bgp' in check_items:
        check_items.remove('bgp')

    logger.info("Sanity check settings: skip_sanity=%s, check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s" % \
        (skip_sanity, check_items, allow_recover, recover_method, post_check))

    if skip_sanity:
        logger.info(
            "Skip sanity check according to command line argument or configuration of test script."
        )
        yield
        return

    if not check_items:
        logger.info(
            "No sanity check item is specified, no pre-test sanity check")
        yield
        logger.info(
            "No sanity check item is specified, no post-test sanity check")
        return

    print_logs(duthost, constants.PRINT_LOGS)
    check_results = do_checks(duthost, check_items)
    logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \
                json.dumps(check_results, indent=4))
    if any([result["failed"] for result in check_results]):
        if not allow_recover:
            pytest.fail(
                "Pre-test sanity check failed, allow_recover=False {}".format(
                    check_results))
            return

        logger.info(
            "Pre-test sanity check failed, try to recover, recover_method=%s" %
            recover_method)
        recover(duthost, localhost, fanouthosts, check_results, recover_method)
        logger.info("Run sanity check again after recovery")
        new_check_results = do_checks(duthost, check_items)
        logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check after recovery results: !!!!!!!!!!!!!!!!\n%s" % \
                    json.dumps(new_check_results, indent=4))
        if any([result["failed"] for result in new_check_results]):
            failed_items = json.dumps(
                [result for result in new_check_results if result["failed"]],
                indent=4)
            logger.error("Failed check items:\n{}".format(failed_items))
            pytest.fail(
                "Pre-test sanity check failed again after recovered by '{}' with failed items:\n{}"
                .format(recover_method, failed_items))
            return

    logger.info("Done pre-test sanity check")

    yield

    logger.info("Start post-test sanity check")

    if not post_check:
        logger.info(
            "No post-test check is required. Done post-test sanity check")
        return

    post_check_results = do_checks(duthost, check_items)
    logger.info("!!!!!!!!!!!!!!!! Post-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \
                json.dumps(post_check_results, indent=4))
    if any([result["failed"] for result in post_check_results]):
        failed_items = json.dumps(
            [result for result in new_check_results if result["failed"]],
            indent=4)
        logger.error("Failed check items:\n{}".format(failed_items))
        pytest.fail(
            "Post-test sanity check failed with failed items:\n{}".format(
                failed_items))
        return

    logger.info("Done post-test sanity check")
    return
Пример #6
0
def sanity_check(testbed_devices, request):
    logger.info("Start pre-test sanity check")

    dut = testbed_devices["dut"]
    localhost = testbed_devices["localhost"]

    skip_sanity = False
    allow_recover = False
    recover_method = "config_reload"
    check_items = set(copy.deepcopy(
        constants.SUPPORTED_CHECK_ITEMS))  # Default check items
    post_check = False

    customized_sanity_check = None
    for m in request.node.iter_markers():
        logger.info("Found marker: m.name=%s, m.args=%s, m.kwargs=%s" %
                    (m.name, m.args, m.kwargs))
        if m.name == "sanity_check":
            customized_sanity_check = m
            break

    if customized_sanity_check:
        logger.info("Process marker %s in script. m.args=%s, m.kwargs=%s" %
                    (m.name, str(m.args), str(m.kwargs)))
        skip_sanity = customized_sanity_check.kwargs.get("skip_sanity", False)
        allow_recover = customized_sanity_check.kwargs.get(
            "allow_recover", False)
        recover_method = customized_sanity_check.kwargs.get(
            "recover_method", "config_reload")
        if allow_recover and recover_method not in constants.RECOVER_METHODS:
            pytest.warning("Unsupported recover method")
            logger.info(
                "Fall back to use default recover method 'config_reload'")
            recover_method = "config_reload"

        check_items = _update_check_items(
            check_items, customized_sanity_check.kwargs.get("check_items", []),
            constants.SUPPORTED_CHECK_ITEMS)
        post_check = customized_sanity_check.kwargs.get("post_check", False)

    if request.config.option.allow_recover:
        allow_recover = True

    logger.info("Sanity check settings: check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s" % \
        (check_items, allow_recover, recover_method, post_check))

    if skip_sanity:
        logger.info(
            "Skip sanity check according to configuration of test script.")
        yield
        return

    if not check_items:
        logger.info(
            "No sanity check item is specified, no pre-test sanity check")
        yield
        logger.info(
            "No sanity check item is specified, no post-test sanity check")
        return

    print_logs(dut, constants.PRINT_LOGS)
    check_results = do_checks(dut, check_items)
    logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \
                json.dumps(check_results, indent=4))
    if any([result["failed"] for result in check_results]):
        if not allow_recover:
            pytest.fail("Pre-test sanity check failed, allow_recover=False")
            return

        logger.info(
            "Pre-test sanity check failed, try to recover, recover_method=%s" %
            recover_method)
        recover(dut, localhost, recover_method)
        logger.info("Run sanity check again after recovery")
        new_check_results = do_checks(dut, check_items)
        logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check after recovery results: !!!!!!!!!!!!!!!!\n%s" % \
                    json.dumps(new_check_results, indent=4))
        if any([result["failed"] for result in new_check_results]):
            pytest.fail(
                "Pre-test sanity check failed again after recovered by '%s'" %
                recover_method)
            return

    logger.info("Done pre-test sanity check")

    yield

    logger.info("Start post-test sanity check")

    if not post_check:
        logger.info(
            "No post-test check is required. Done post-test sanity check")
        return

    post_check_results = do_checks(dut, check_items)
    logger.info("!!!!!!!!!!!!!!!! Post-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \
                json.dumps(post_check_results, indent=4))
    if any([result["failed"] for result in post_check_results]):
        pytest.fail("Post-test sanity check failed")
        return

    logger.info("Done post-test sanity check")
    return
Пример #7
0
def gan_images(args):
    if args.set_seed:
        torch.manual_seed(0)
        np.random.seed(0)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    os.makedirs(BASE_DIR, exist_ok=True)

    def reset_gen():
        if args.model.startswith('began'):
            gen = Generator128(64)
            if 'untrained' not in args.model:
                gen = load_trained_net(gen, (
                    './checkpoints/celeba_began.withskips.bs32.cosine.min=0.25'
                    '.n_cuts=0/gen_ckpt.49.pt'))
            gen = gen.eval().to(DEVICE)
            img_size = 128
        elif args.model.startswith('beta_vae'):
            gen = VAE()
            if 'untrained' not in args.model:
                t = torch.load(
                    './vae_checkpoints/vae_bs=128_beta=0.1/epoch_19.pt')
                gen.load_state_dict(t)
            gen = gen.eval().to(DEVICE)
            gen = gen.decoder
            img_size = 128
        elif args.model.startswith('biggan'):
            gen = BigGanSkip().to(DEVICE)
            img_size = 512
        elif args.model.startswith('dcgan'):
            gen = dcgan_generator()
            if 'untrained' not in args.model:
                t = torch.load(
                    ('./dcgan_checkpoints/netG.epoch_24.n_cuts_0.bs_64'
                     '.b1_0.5.lr_0.0002.pt'))
                gen.load_state_dict(t)
            gen = gen.eval().to(DEVICE)
            img_size = 64

        elif args.model.startswith('vanilla_vae'):
            gen = VAE()
            if 'untrained' not in args.model:
                t = torch.load(
                    './vae_checkpoints/vae_bs=128_beta=1.0/epoch_19.pt')
                gen.load_state_dict(t)
            gen = gen.eval().to(DEVICE)
            gen = gen.decoder
            img_size = 128
        else:
            raise NotImplementedError()
        return gen, img_size

    gen, img_size = reset_gen()
    img_shape = (3, img_size, img_size)
    metadata = recovery_settings[args.model]
    n_cuts_list = metadata['n_cuts_list']
    del (metadata['n_cuts_list'])

    z_init_mode_list = metadata['z_init_mode']
    limit_list = metadata['limit']
    assert len(z_init_mode_list) == len(limit_list)
    del (metadata['z_init_mode'])
    del (metadata['limit'])

    forwards = forward_models[args.model]

    data_split = Path(args.img_dir).name
    for img_name in tqdm(sorted(os.listdir(args.img_dir)),
                         desc='Images',
                         leave=True,
                         disable=args.disable_tqdm):
        # Load image and get filename without extension
        # If untrained, reset generator for every image
        if "untrained" in args.model:
            gen, _ = reset_gen()
        orig_img = load_target_image(os.path.join(args.img_dir, img_name),
                                     img_size).to(DEVICE)
        img_basename, _ = os.path.splitext(img_name)

        for n_cuts in tqdm(n_cuts_list,
                           desc='N_cuts',
                           leave=False,
                           disable=args.disable_tqdm):
            metadata['n_cuts'] = n_cuts
            for i, (f, f_args_list) in enumerate(
                    tqdm(forwards.items(),
                         desc='Forwards',
                         leave=False,
                         disable=args.disable_tqdm)):
                for f_args in tqdm(f_args_list,
                                   desc=f'{f} Args',
                                   leave=False,
                                   disable=args.disable_tqdm):

                    f_args['img_shape'] = img_shape
                    forward_model = get_forward_model(f, **f_args)

                    for z_init_mode, limit in zip(
                            tqdm(z_init_mode_list,
                                 desc='z_init_mode',
                                 leave=False), limit_list):
                        metadata['z_init_mode'] = z_init_mode
                        metadata['limit'] = limit

                        # Before doing recovery, check if results already exist
                        # and possibly skip
                        recovered_name = 'recovered.pt'
                        results_folder = get_results_folder(
                            image_name=img_basename,
                            model=args.model,
                            n_cuts=n_cuts,
                            split=data_split,
                            forward_model=forward_model,
                            recovery_params=dict_to_str(metadata),
                            base_dir=BASE_DIR)

                        os.makedirs(results_folder, exist_ok=True)

                        recovered_path = results_folder / recovered_name
                        if os.path.exists(
                                recovered_path) and not args.overwrite:
                            print(
                                f'{recovered_path} already exists, skipping...'
                            )
                            continue

                        if args.run_name is not None:
                            current_run_name = (
                                f'{img_basename}.n_cuts={n_cuts}'
                                f'.{forward_model}.z_lr={metadata["z_lr"]}'
                                f'.z_init={z_init_mode}.limit={limit}'
                                f'.{args.run_name}')
                        else:
                            current_run_name = None

                        recovered_img, distorted_img, _ = recover(
                            orig_img, gen, metadata['optimizer'], n_cuts,
                            forward_model, z_init_mode, limit,
                            metadata['z_lr'], metadata['n_steps'],
                            metadata['restarts'], args.run_dir,
                            current_run_name, args.disable_tqdm)

                        # Make images folder
                        img_folder = get_images_folder(split=data_split,
                                                       image_name=img_basename,
                                                       img_size=img_size,
                                                       base_dir=BASE_DIR)
                        os.makedirs(img_folder, exist_ok=True)

                        # Save original image if needed
                        original_img_path = img_folder / 'original.pt'
                        if not os.path.exists(original_img_path):
                            torch.save(orig_img, original_img_path)

                        # Save distorted image if needed
                        if forward_model.viewable:
                            distorted_img_path = img_folder / f'{forward_model}.pt'
                            if not os.path.exists(distorted_img_path):
                                torch.save(distorted_img, distorted_img_path)

                        # Save recovered image and metadata
                        torch.save(recovered_img, recovered_path)
                        pickle.dump(
                            metadata,
                            open(results_folder / 'metadata.pkl', 'wb'))
                        p = psnr(recovered_img, orig_img)
                        pickle.dump(p, open(results_folder / 'psnr.pkl', 'wb'))