Пример #1
0
  def __init__(self, learner, data_source, step_kwargs={}):
    self.learner = learner
    self.data_source = data_source
    self.hidden_state = learner.policy.initial_state(data_source.batch_size)
    self.step_kwargs = step_kwargs

    self.data_profiler = utils.Profiler()
    self.step_profiler = utils.Profiler()
Пример #2
0
    def __init__(
        self,
        learner,
        data_source,
        saved_model_path,
        table_length,
        table_name,
    ):
        self.learner = learner
        self.data_source = data_source
        self.hidden_state = learner.policy.initial_state(
            data_source.batch_size)
        self.saved_model_path = saved_model_path

        self.data_profiler = utils.Profiler()
        self.step_profiler = utils.Profiler()

        self.mdf = pd.DataFrame()
        self.time = 0
        self.table_length = table_length
        self.table_name = table_name
Пример #3
0
def testPanorama(example, outprefix, anms, cmax, intermediates=False):
    if example == 1:
        # example 1: living room
        outpath = './data/panorama/livingroom/processed/'
        paths = [
            './data/panorama/livingroom/lr-l.jpg',
            './data/panorama/livingroom/lr-c.jpg',
            './data/panorama/livingroom/lr-r.jpg'
        ]
    else:
        # example 2: balcony
        outpath = './data/panorama/balcony/processed/'
        paths = [
            './data/panorama/balcony/IMG_4189.jpg',
            './data/panorama/balcony/IMG_4190.jpg',
            './data/panorama/balcony/IMG_4191.jpg',
            './data/panorama/balcony/IMG_4188.jpg',
            './data/panorama/balcony/IMG_4192.jpg',
            './data/panorama/balcony/IMG_4187.jpg',
            './data/panorama/balcony/IMG_4193.jpg',
            './data/panorama/balcony/IMG_4186.jpg',
            './data/panorama/balcony/IMG_4194.jpg',
            './data/panorama/balcony/IMG_4185.jpg',
            './data/panorama/balcony/IMG_4195.jpg'
        ]

    imgs = []
    np.random.seed(12)
    S, T = paths[:2]

    with utils.Profiler():
        print(paths[0], paths[1])

        try:
            S, T = utils.Image.load(S, T, float=True)
            with utils.Profiler():
                T, T_ = stitch(S,
                               T,
                               T,
                               anms,
                               cmax,
                               maskpow=.2,
                               intermediates=imgs if intermediates else None)
            imgs.append(T)

        except Exception as e:
            print(e)
            print('error processing: ', paths[0], paths[1], ' skip')

        for path in paths[2:]:
            print(path)

            try:
                S = utils.Image.load(path, float=True)
                with utils.Profiler():
                    T, T_ = stitch(
                        S,
                        T,
                        T_,
                        anms,
                        cmax,
                        maskpow=6.,
                        intermediates=imgs if intermediates else None)
                imgs.append(T)

            except Exception as e:
                print(e)
                print('error processing: ', path, ' skip.')

        print('done')

    print('saving images...')

    if not intermediates:
        imgs = imgs[-1:]

    for i, img in enumerate(imgs):
        if type(img) is np.ndarray:
            utils.Image.save((img, outpath + outprefix + str(i) + '.jpg'))
        else:
            img.savefig(outpath + outprefix + str(i) + '.svg',
                        dpi=1200,
                        transparent=True,
                        bbox_inches='tight',
                        pad_inches=0)

        print(i + 1, ' saved...')
Пример #4
0
    def _run(self):
        """Detection loop captures Kodi render buffer every 1s to create an
           image hash. Hash is compared to the previous hash to determine
           whether current frame of video is similar to the previous frame.

           Hash is also compared to hashes calculated from previously played
           episodes to detect common sequence of frames (i.e. credits).

           A consecutive number of matching frames must be detected to confirm
           that end credits are playing."""

        self.log('Started')
        self.running = True

        if self.state.detector_debug:
            profiler = utils.Profiler()

        play_time = 0
        while not (self.monitor.abortRequested() or self.sigterm
                   or self.sigstop):
            loop_start_time = timeit.default_timer()

            with self.player as check_fail:
                play_time = self.player.getTime()
                total_time = self.player.getTotalTime()
                self.hash_index['current'] = (int(total_time - play_time),
                                              self.hashes.episode)
                # Only capture if playing at normal speed
                # check_fail = self.player.get_speed() != 1
                check_fail = False
            if check_fail:
                self.log('No file is playing')
                break

            self.capturer.capture(*self.capture_size)
            image = self.capturer.getImage()

            # Capture failed or was skipped, re-initialise RenderCapture
            if not image or image[-1] != 255:
                continue

            # Convert captured video frame from a nominal default 484x272 BGRA
            # image to a 14x8 greyscale image, depending on video aspect ratio
            image = self._pre_process_image(image, self.capture_size,
                                            self.hashes.hash_size)

            # Generate median absolute deviation from median hash
            image_hash = self._calc_image_hash(image)

            # Check if current hash matches with previous hash, typical end
            # credits hash, or other episode hashes
            stats = self._check_similarity(image_hash)

            if self.state.detector_debug:
                self.log('Match: {0[hits]}/{1}, Miss: {0[misses]}/{2}'.format(
                    self.match_counts, self.match_number,
                    self.mismatch_number))

                self._print_hash(
                    self.hashes.data.get(self.hash_index['credits']),
                    image_hash, self.hashes.hash_size,
                    ('Hash compare: {0:2.1f}% similar to typical credits'
                     ).format(stats['credits']))

                self._print_hash(
                    self.hashes.data.get(self.hash_index['previous']),
                    image_hash, self.hashes.hash_size,
                    ('Hash compare: {0:2.1f}% similar to previous frame'
                     ' with {1:2.1f}% significant pixels').format(
                         stats['previous'], stats['significance']))

                self._print_hash(
                    self.past_hashes.data.get(self.hash_index['episodes']),
                    image_hash, self.hashes.hash_size,
                    ('Hash compare: {0:2.1f}% similar to other episodes'
                     ).format(stats['episodes']))

                self.log(profiler.get_stats())

            # Store current hash for comparison with next video frame
            self.hashes.data[self.hash_index['current']] = image_hash
            self.hash_index['previous'] = self.hash_index['current']

            # Wait until total loop time of 1s has elapsed
            self.monitor.waitForAbort(
                max(0.1, 1 - timeit.default_timer() + loop_start_time))

        self.update_timestamp(play_time, total_time)

        # Reset thread signals
        self.log('Stopped')
        self.running = False
        self.sigstop = False
        self.sigterm = False
import config
from transformational_measures.pytorch import ImageDataset
matplotlib.use('Agg')

dataset_name = "cifar10"
model_config = config.SimpleConvConfig()

print(f"### Loading dataset {dataset_name} and model {model_config.name}....")

use_cuda = True
dataset = datasets.get_classification(dataset_name)
numpy_dataset = NumpyDataset(dataset.x_test, dataset.y_test)
image_dataset = ImageDataset(numpy_dataset)
model, optimizer = model_config.make_model_and_optimizer(
    dataset.input_shape, dataset.num_classes, use_cuda)
p = utils.Profiler()
p.event("start")

#transformations=tm.SimpleAffineTransformationGenerator(r=360, s=3, t=2,n_rotations=4)
transformations = tm.SimpleAffineTransformationGenerator(r=360, n_rotations=4)
transformations.set_input_shape(dataset.input_shape)
transformations.set_pytorch(True)
transformations.set_cuda(use_cuda)

iterator = tm.NormalPytorchActivationsIterator(model,
                                               image_dataset,
                                               transformations,
                                               batch_size=64,
                                               num_workers=0,
                                               use_cuda=use_cuda)
adapter = tm.PytorchNumpyImageTransformationAdapter(use_cuda=use_cuda)