def gen_video_preview(elem, output_dir):
        """
        Copy temporary image to specified output filepath.

        :param elem: Data element to get the preview image for.
        :type elem: smqtk.data_rep.DataElement

        :param output_dir: Directory to save generated image to.
        :type output_dir: str

        """
        output_fp = os.path.join(output_dir,
                                 "%s.gif" % elem.md5())
        if not os.path.isfile(output_fp):
            tmp_vid_fp = elem.write_temp()
            interval = 0.5  # ~2fps gif
            fm = video_utils.ffmpeg_extract_frame_map(
                tmp_vid_fp, second_interval=interval
            )
            img_arrays = []
            for frm_num in sorted(fm.keys()):
                img_arrays.append(imageio.imread(fm[frm_num]))
            imageio.mimwrite(output_fp, img_arrays, duration=interval)
            elem.clean_temp()
        return output_fp
Exemple #2
0
def MakeGlitchGifVSH(image, len_=60, blockSize=16, sigma=10, iterations=300, random_=True, Glitch_=False):
    im = Image.open(image)
    VSH = imageio.mimread('vsh.gif')
    VSH = extendgif(VSH, len_)
    nFrames = []

    glitchVar = 0

    path = '/'.join(image.split('/')[:-1])
    pathT = '/'.join(image.split('/')[:-1])
    name = image.split('/')[-1]
    fname = name.split('.')[0]
    path += '/glitch_' + fname + '.gif'

    frames = [im.copy() for a in range(len_)]
    i = 0
    for frame in frames:
        i += 1

        if random.randint(0, 15) >= 10 and glitchVar == 0:
            glitchVar = random.randint(1, sigma)
        if glitchVar != 0:
            frame = GlitchRet(frame.convert('RGB'), Glitch_=Glitch_, sigma=glitchVar, blockSize=blockSize,
                              iterations=iterations, random_=random_)
            glitchVar -= 1
        frame = ImageChops.multiply(frame, Image.fromarray(VSH[i]).resize(frame.size).convert('RGB'))
        nFrames.append(np.asarray(frame.convert('RGB')))
    i = 0
    imageio.mimwrite(path, nFrames, )

    return path
Exemple #3
0
 def run(self):
     self._create_target_path()
     sequences = self.get_sequences()
     for sequence_id, sequence in enumerate(sequences):
         sequence_filename = 'sequence_{sequence_id}.gif'.format(**locals())
         target_filename = os.path.join(self.target_path, sequence_filename)
         image_sequence = [imageio.imread(frame) for frame in sequence]
         imageio.mimwrite(target_filename, image_sequence, format='GIF', loop=0, duration=0.5)
         click.echo('{:>15}: {}'.format('saved', target_filename))
         click.echo('-' * 80)
Exemple #4
0
def save_animation(episode, filename):
    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    image_generator = (screenshot_pair[1] for (
        screenshot_pair, actions, rewards) in episode)

    imageio.mimwrite(filename, image_generator, fps=50)
    print "Saved video to %s" % filename
Exemple #5
0
    def save(self, path):

        """
        This function ...
        :param path:
        :param fps:
        :return:
        """

        # Create and write the GIF file
        imageio.mimwrite(path, self.frames, fps=self.fps)
Exemple #6
0
    def get_preview_image(self, save_dir=None, regenerate=False):
        """
        Generate and return a preview GIF animation for this video file. File
        saved is named according to the format: %s.preview.gif, where the '%s'
        is the MD5 hex sum of the video that the preview is of.

        If a preview has already been generated somewhere and still exists on
        the file system, we simply return the cached path to that file, if
        regenerate is False.

        :param save_dir: Optional directory to save generated GIF image file to.
            By default we save it in this video file's working directory.
        :type save_dir: str

        :param regenerate: Force regeneration of the preview GIF image file.
            This also rewrites the cached location so the regenerated file is
            new returned.
        :type regenerate: bool

        :return: The path to a preview image for this data file.
        :rtype: str

        """
        if (self.__preview_cache is None
                or not osp.isfile(self.__preview_cache)
                or regenerate):
            fname = "%s.preview.gif" % self.md5sum
            if save_dir:
                safe_create_dir(save_dir)
                target_fp = osp.join(save_dir, fname)
            else:
                target_fp = osp.join(self.work_directory, fname)
            # if the file already exists, we don't need to generate it again
            if not osp.isfile(target_fp):
                self.log.debug("[%s] GIF file doesn't exist, generating", self)
                md = self.metadata()
                offset = md.duration * 0.2
                interval = 0.5  # ~2fps gif
                max_duration = min(10.0, md.duration * 0.6)
                fm = self.frame_map(offset, interval, max_duration)
                img_arrays = []
                for frm_num in sorted(fm.keys()):
                    img_arrays.append(imageio.imread(fm[frm_num]))
                imageio.mimwrite(target_fp, img_arrays, duration=interval)
                self.log.debug("[%s] Finished generating GIF", self)
            self.__preview_cache = target_fp

        return self.__preview_cache
Exemple #7
0
def responsive_screenshot(url, sleep, resize):
    if not url.startswith('http://') and not url.startswith('https://'):
        sys.exit('URL must start with http:// or https://')

    filename = valid_filename(url.split('://')[-1])
    # note: PhantomJS always returns a screenshot that's the full height of the page
    # note: Chromium doesn't resize smaller than ~500x500
    largest_size = (1920, 1200)
    smallest_size = (320, 1200) # iPhone 4

    frames = []
    for x in range(0, 101, 5):  # +1 to ensure that we include the largest size in our set
        width, height = pytweening.getPointOnLine(*smallest_size, *largest_size, x / 100.0)
        frame = take_screenshot(url, int(width), int(height), largest_size, int(sleep), resize)
        frames.append(frame)
    imageio.mimwrite('{}-{}.gif'.format(str(date.today()), filename), frames, duration=0.2)
Exemple #8
0
def movie(images, path=None, **kwargs):
    """ Create a movie for images """
    import imageio
    if path is None:
        path = imageio.RETURN_BYTES
        
    return imageio.mimwrite(path, images, format='GIF', **kwargs)
Exemple #9
0
    def saveto(self, path):

        """
        This function ...
        :param path:
        :param fps:
        :return:
        """

        # APNG: special
        if path.endswith(".apng"): write_apng(path, self.frames)

        # Use ImageIO
        else: imageio.mimwrite(path, self.frames, fps=self.fps)

        # Update the path
        self.path = path
Exemple #10
0
def GlitchGif(gif, blockSize=16, sigma=10, iterations=300, random_=True, Glitch_=False):
    im = Image.open(gif)
    nFrames = []
    glitchVar = 0
    # original_duration = im.info['duration']
    path = '/'.join(gif.split('/')[:-1])
    name = gif.split('/')[-1]
    path += '/glitch_' + name
    print(path)
    for frame in ImageSequence.Iterator(im):
        if random.randint(0, 15) >= 10 and glitchVar == 0:
            glitchVar = random.randint(1, sigma)
        if glitchVar != 0:
            frame = GlitchRet(frame.convert('RGB'), Glitch_=True, sigma=glitchVar, blockSize=blockSize,
                              iterations=iterations, random_=random_)
            glitchVar -= 1
        nFrames.append(np.asarray(frame.convert('RGB')))
    # fps = (original_duration) / 1000
    # print(fps)
    # imageio.mimwrite(path, nFrames, **{'duration': fps})
    imageio.mimwrite(path, nFrames, )
    return path
Exemple #11
0
def MakeGlitchGif(image, len_=60, blockSize=16, sigma=10, iterations=300, random_=True, Glitch_=False):
    im = Image.open(image)
    nFrames = []
    glitchVar = 0

    path = '/'.join(image.split('/')[:-1])
    name = image.split('/')[-1]
    fname = name.split('.')[0]
    path += '/glitch_' + fname + '.gif'

    frames = [im.copy() for a in range(len_)]
    for frame in frames:

        if random.randint(0, 15) >= 10 and glitchVar == 0:
            glitchVar = random.randint(1, sigma)
        if glitchVar != 0:
            frame = GlitchRet(frame.convert('RGB'), Glitch_=True, sigma=glitchVar, blockSize=blockSize,
                              iterations=iterations, random_=random_)
            glitchVar -= 1
        nFrames.append(np.asarray(frame.convert('RGB')))

    imageio.mimwrite(path, nFrames, )
    return path
Exemple #12
0
def movie(roots_func, region, filename = 'test.mp4', fps = 20, seconds = 5, period = 1, antialiased = 3):
    colorizer = Colorizer()
    images = []

    N = int(fps * seconds)

    start = time.monotonic()

    for t in np.linspace(0, period, N, endpoint = False):
        roots = roots_func(t)
        func = Poly(roots)
        nf = NF(func, max_steps = 400)

        image = compute_image(nf, colorizer, region, antialiased)
        images.append(flip_data(image))

        print ("Done frame", len(images), "of", N, ",",
                int(time.monotonic() - start), "seconds elapsed")

    elapsed = time.monotonic() - start

    print ("Time elapsed (seconds)", elapsed)

    imageio.mimwrite(filename, images, fps = fps, quality = 10)
Exemple #13
0
    def get_loop(self):
        """Build an animated GIF of recent radar images."""
        if len(self.station_code) == 5:
            count = 20
            fps = 10
        else:
            count = 12
            fps = 6

        frames = self.get_frames(count)
        gifs = [imageio.imread(f) for f in frames]

        return imageio.mimwrite(imageio.RETURN_BYTES,
                                gifs,
                                format='GIF',
                                fps=fps)
def throw_party_in_memory(img: np.array) -> bytes:
    """Make `img` party and return it as bytes."""

    offsets_x = OFFSET_MULTIPLIER * np.sin(np.arange(-np.pi, np.pi, OFFSET_STEP))
    offsets_y = OFFSET_MULTIPLIER * np.cos(np.arange(-np.pi, np.pi, OFFSET_STEP))

    images = []
    NUM_CHANNELS = 3

    if len(img.shape) == 3 and img.shape[2] > 3:
        img = img[:, :, :3]

    for i, (offset_x, offset_y) in enumerate(zip(offsets_x, offsets_y)):
        c = i % NUM_CHANNELS
        images.append(
            transform(img, emphasize_channel=c, offset_x=offset_x, offset_y=offset_y)
        )
    return imageio.mimwrite(imageio.RETURN_BYTES, images, format="gif", fps=FPS)
                    recent_noises, recent_img_noises = append_new_noise_list(
                        multinoises,
                        img_noises,
                        recent_noises,
                        recent_img_noises,
                        maxI=iterations_to_save)

                    socket.send(img_to_send)
                    img_seq.append(img_to_save)
                    curr_iter += 1
                    print("Sent image")
                else:
                    training_heightmap = False
                    imageio.imwrite("sentimage.png", img_seq[-1])
                    imageio.mimwrite("imgseq.gif", img_seq)
                    socket.send_string("finished")

            if (message[0] == "requesting_heightmap_with_masks"):
                if (not training_heightmap):
                    training_heightmap = True
                    curr_iter = 0
                    img_seq = []
                    losses, masks = request_and_masks_to_losses_and_masks(
                        message[1:])

                    multinoises, img_to_send, img_to_save, optimizer = \
                    optimize_one_step_multinoise(model, None, multinoises, freeze_style,
                    img_noises,
                    losses, masks, heightmap_full_resolution)
Exemple #16
0
 def imwrite_frames(filename, images, *args, **kw):
     '''write multiple frames to multi image formats'''
     mimwrite(filename, images, *args, **kw)
Exemple #17
0
                                  err[b].cpu().unsqueeze(0))
        torchvision.utils.save_image(img, '{}/residual.png'.format(fname))

    # compute predictions for different z vectors, each prediction has its own folder
    nz = 50
    mov = []
    for indx in range(nz):
        mov.append([])
        print(indx)
        z = Variable(torch.zeros(opt.batch_size, 1, model.nLatent).cpu())
        z.data.copy_(zlist[indx].view(1, model.nLatent).expand(z.size()))
        pred_z = model.decode(vcond, z)
        pred_z = pred_z.data.view(opt.batch_size, opt.npred, opt.nc,
                                  opt.height, opt.width)
        for b in range(opt.batch_size):
            img = dataloader.plot_seq(cond[b].cpu().unsqueeze(0),
                                      pred_z[b].cpu().unsqueeze(0))
            fname = '{}/ep{}'.format(save_dir, b)
            torchvision.utils.save_image(img, '{}/z{}.png'.format(fname, indx))
            mov[-1].append(img)

    # write in movie form for easier viewing
    for indx in range(nz):
        mov[indx] = torch.stack(mov[indx])
    mov = torch.stack(mov)
    mov = mov.permute(1, 0, 3, 4, 2).cpu().clone()
    for b in range(opt.batch_size):
        imageio.mimwrite('{}/movie{}.mp4'.format(save_dir, b),
                         mov[b].cpu().numpy(),
                         fps=5)
    def track(self, detections_path, tracks_path):
        """
        Detects all trajectories in the video sequence and saves the results to a .csv.
        Args:
            detections_file (str): Output csv with estimated detections.
            tracks_file (str): Output csv with estimated tracks.
        Returns:
            tracks (pd.DataFrame): Dataframe with the tracking results.
        """
        os.makedirs(tracks_path, exist_ok=True)
        tracks_file = tracks_path + "/" + self.basename + '_tracks.csv'
        detections_file = detections_path + "/" + self.basename + '_detections.csv'

        mp4_video = 'tmp.mp4'
        output_video = self.basename + '.mp4'
        mimwrite(mp4_video, self.sequence, format='mp4', fps=self.fps)

        # opcionales del código de matlab
        save_movie = 0
        plot_results = 0
        snap_shot = 0
        plot_track_results = 0
        analyze_motility = 0

        reformat_detections_file = self.detection_algorithm
        num_frames = self.sequence.shape[0]
        ROIx = self.sequence.shape[2]
        ROIy = self.sequence.shape[1]

        self.octave.Tracker(detections_file,
                            mp4_video,
                            output_video,
                            tracks_file,
                            reformat_detections_file,
                            num_frames,
                            self.fps,
                            self.px2um,
                            ROIx,
                            ROIy,
                            self.mtt_algorithm,
                            self.PG,
                            self.PD,
                            self.gv,
                            plot_results,
                            save_movie,
                            snap_shot,
                            plot_track_results,
                            analyze_motility,
                            nout=0)
        self.octave.clear_all(nout=0)

        tracks = pd.read_csv(tracks_file)
        tracks.columns = ['id', 'x', 'y', 'frame']
        tracks['fluorescence'] = np.nan
        tracks = tracks[['id', 'x', 'y', 'fluorescence', 'frame']]
        tracks[['x', 'y']] = tracks[['x', 'y']] / self.px2um

        # fluorescence
        if self.detection_algorithm != 2:
            detections = pd.read_csv(detections_file)
            tracks = add_fluorescence_to_tracks(detections, tracks)
        tracks.to_csv(tracks_file, index=False)
        os.remove(mp4_video)

        return tracks
Exemple #19
0
def run(words,
        version=1,
        level='H',
        picture=None,
        colorized=False,
        contrast=1.0,
        brightness=1.0,
        save_name=None,
        save_dir=os.getcwd()):
    print("start run")
    supported_chars = r"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ··,.:;+-*/\~!@#$%^&`'=<>[]()?_{}|"

    # check every parameter
    if not isinstance(words, str) or any(i not in supported_chars
                                         for i in words):
        raise ValueError(
            'Wrong words! Make sure the characters are supported!')
    if not isinstance(version, int) or version not in range(1, 41):
        raise ValueError(
            'Wrong version! Please choose a int-type value from 1 to 40!')
    if not isinstance(level, str) or len(level) > 1 or level not in 'LMQH':
        raise ValueError(
            "Wrong level! Please choose a str-type level from {'L','M','Q','H'}!"
        )
    if picture:
        if not isinstance(picture, str) or not os.path.isfile(
                picture) or picture[-4:] not in ('.jpg', '.png', '.bmp',
                                                 '.gif'):
            raise ValueError(
                "Wrong picture! Input a filename that exists and be tailed with one of {'.jpg', '.png', '.bmp', '.gif'}!"
            )
        if picture[-4:] == '.gif' and save_name and save_name[-4:] != '.gif':
            raise ValueError(
                'Wrong save_name! If the picuter is .gif format, the output filename should be .gif format, too!'
            )
        if not isinstance(colorized, bool):
            raise ValueError('Wrong colorized! Input a bool-type value!')
        if not isinstance(contrast, float):
            raise ValueError('Wrong contrast! Input a float-type value!')
        if not isinstance(brightness, float):
            raise ValueError('Wrong brightness! Input a float-type value!')
    if save_name and (not isinstance(save_name, str) or save_name[-4:]
                      not in ('.jpg', '.png', '.bmp', '.gif')):
        raise ValueError(
            "Wrong save_name! Input a filename tailed with one of {'.jpg', '.png', '.bmp', '.gif'}!"
        )
    if not os.path.isdir(save_dir):
        raise ValueError('Wrong save_dir! Input a existing-directory!')

    def combine(ver,
                qr_name,
                bg_name,
                colorized,
                contrast,
                brightness,
                save_dir,
                save_name=None):
        from MyQR.mylibs.constant import alig_location
        from PIL import ImageEnhance, ImageFilter
        print("ver:", ver)
        print("qr_name:", qr_name)
        print("bg_name:", bg_name)
        print("colorized:", colorized)
        print("contrast:", contrast)
        print("brightness:", brightness, '/n', "save_dir:", save_dir, '/n',
              "save_name:", save_name)

        qr = Image.open(qr_name)
        qr = qr.convert('RGBA') if colorized else qr

        bg0 = Image.open(bg_name).convert('RGBA')
        bg0 = ImageEnhance.Contrast(bg0).enhance(contrast)
        bg0 = ImageEnhance.Brightness(bg0).enhance(brightness)

        if bg0.size[0] < bg0.size[1]:
            bg0 = bg0.resize(
                (qr.size[0] - 24,
                 (qr.size[0] - 24) * int(bg0.size[1] / bg0.size[0])))
        else:
            bg0 = bg0.resize(
                ((qr.size[1] - 24) * int(bg0.size[0] / bg0.size[1]),
                 qr.size[1] - 24))

        bg = bg0 if colorized else bg0.convert('1')  #convert to binary

        aligs = []
        if ver > 1:
            aloc = alig_location[ver - 2]
            for a in range(len(aloc)):
                for b in range(len(aloc)):
                    if not ((a == b == 0) or (a == len(aloc) - 1 and b == 0) or
                            (a == 0 and b == len(aloc) - 1)):
                        for i in range(3 * (aloc[a] - 2), 3 * (aloc[a] + 3)):
                            for j in range(3 * (aloc[b] - 2),
                                           3 * (aloc[b] + 3)):
                                aligs.append((i, j))

        for i in range(qr.size[0] - 24):
            for j in range(qr.size[1] - 24):
                if not ((i in (18, 19, 20)) or (j in (18, 19, 20)) or
                        (i < 24 and j < 24) or
                        (i < 24 and j > qr.size[1] - 49) or
                        (i > qr.size[0] - 49 and j < 24) or
                        ((i, j) in aligs) or (i % 3 == 1 and j % 3 == 1) or
                        (bg0.getpixel((i, j))[3] == 0)):
                    qr.putpixel((i + 12, j + 12), bg.getpixel((i, j)))

        qr_name = os.path.join(
            save_dir,
            os.path.splitext(os.path.basename(bg_name))[0] +
            '_qrcode.png') if not save_name else os.path.join(
                save_dir, save_name)
        qr.resize((qr.size[0] * 3, qr.size[1] * 3)).save(qr_name)
        return qr_name

    tempdir = os.path.join(os.path.expanduser('~'), '.myqr')
    print("tempdir:", tempdir)

    try:
        if not os.path.exists(tempdir):
            os.makedirs(tempdir)

        ver, qr_name = theqrmodule.get_qrcode(version, level, words, tempdir)
        print("ver:", ver, "qr_name:", qr_name)

        if picture and picture[-4:] == '.gif':
            import imageio

            im = Image.open(picture)
            duration = im.info.get('duration', 0)
            im.save(os.path.join(tempdir, '0.png'))
            while True:
                try:
                    seq = im.tell()
                    im.seek(seq + 1)
                    im.save(os.path.join(tempdir, '%s.png' % (seq + 1)))
                except EOFError:
                    break

            imsname = []
            for s in range(seq + 1):
                bg_name = os.path.join(tempdir, '%s.png' % s)
                imsname.append(
                    combine(ver, qr_name, bg_name, colorized, contrast,
                            brightness, tempdir))

            ims = [imageio.imread(pic) for pic in imsname]
            qr_name = os.path.join(
                save_dir,
                os.path.splitext(os.path.basename(picture))[0] +
                '_qrcode.gif') if not save_name else os.path.join(
                    save_dir, save_name)
            imageio.mimwrite(qr_name, ims, '.gif',
                             **{'duration': duration / 1000})
        elif picture:
            qr_name = combine(ver, qr_name, picture, colorized, contrast,
                              brightness, save_dir, save_name)
        elif qr_name:
            qr = Image.open(qr_name)
            qr_name = os.path.join(
                save_dir,
                os.path.basename(qr_name)) if not save_name else os.path.join(
                    save_dir, save_name)
            qr.resize((qr.size[0] * 3, qr.size[1] * 3)).save(qr_name)

        return ver, level, qr_name

    except:
        raise
    finally:
        import shutil
        if os.path.exists(tempdir):
            shutil.rmtree(tempdir)
Exemple #20
0
nDiv = 20
width = 5
scale = 4
img_list = [f for f in os.listdir('img_input') if (os.path.isfile('img_input/' + f) and f.endswith('.png'))]

for img_name in img_list:
	print(img_name)
	img_inp = Image.open('img_input/' + img_name)
	img_tar = Image.open('img_output/EDSR_x{}/myImages/X{}/'.format(scale, scale) + img_name)

	(w, h) = img_inp.size
	ww, hh = w * scale, h * scale
	img_inp = img_inp.resize((ww, hh), Image.NEAREST)

	img_inp = np.asarray(img_inp)
	img_tar = np.asarray(img_tar)

	output = list(range(2 * (nDiv - 1)))
	for i in range(nDiv - 1):
		frame = np.array(img_inp)
		idx = int(img_inp.shape[1] * (i + 1) / nDiv)
		frame[:, :idx - width, :] = img_inp[:, :idx - width, :]
		frame[:, idx + width:, :] = img_tar[:, idx + width:, :]
		frame[:, idx-width:idx+width, :] = np.array([[[0,0,255]]])
		output[i] = frame
		output[2 * (nDiv - 1) - i - 1] = frame
	
	(name, ext) = os.path.splitext(img_name)
	imageio.mimwrite('gif/{}.gif'.format(name), output, fps=nDiv, palettesize=256)

# encoding=big5
import imageio
import numpy
import sys

SIZE = 288

# ENV
im_name = input("請輸入圖片檔名:")
im = imageio.imread(im_name)
# im=im.astype("int32")
WIDTH_n = int(im.shape[1] // SIZE)
HEIGHT_n = int(im.shape[0] // SIZE)
TOTAL_n = WIDTH_n * HEIGHT_n

# Splitter
im_list = []
for col in range(0, HEIGHT_n):
    for row in range(0, WIDTH_n):
        for times in (0, 1):
            im_list.append(im[col * SIZE:(col + 1) * SIZE, row * SIZE:(row + 1) * SIZE])
imageio.mimwrite(".//complete.gif", im_list, duration=0.02)
Exemple #22
0
def run(words, version=1, level='H', picture=None, colorized=False, contrast=1.0, brightness=1.0, save_name=None, save_dir=os.getcwd()):

    supported_chars = r"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ··,.:;+-*/\~!@#$%^&`'=<>[]()?_{}|"


    # check every parameter
    if not isinstance(words, str) or any(i not in supported_chars for i in words):
        raise ValueError('Wrong words! Make sure the characters are supported!')
    if not isinstance(version, int) or version not in range(1, 41):
        raise ValueError('Wrong version! Please choose a int-type value from 1 to 40!')
    if not isinstance(level, str) or len(level)>1 or level not in 'LMQH':
        raise ValueError("Wrong level! Please choose a str-type level from {'L','M','Q','H'}!")
    if picture:
        if not isinstance(picture, str) or not os.path.isfile(picture) or picture[-4:] not in ('.jpg','.png','.bmp','.gif'):
            raise ValueError("Wrong picture! Input a filename that exists and be tailed with one of {'.jpg', '.png', '.bmp', '.gif'}!")
        if picture[-4:] == '.gif' and save_name and save_name[-4:] != '.gif':
            raise ValueError('Wrong save_name! If the picuter is .gif format, the output filename should be .gif format, too!')
        if not isinstance(colorized, bool):
            raise ValueError('Wrong colorized! Input a bool-type value!')
        if not isinstance(contrast, float):
            raise ValueError('Wrong contrast! Input a float-type value!')
        if not isinstance(brightness, float):
            raise ValueError('Wrong brightness! Input a float-type value!')
    if save_name and (not isinstance(save_name, str) or save_name[-4:] not in ('.jpg','.png','.bmp','.gif')):
        raise ValueError("Wrong save_name! Input a filename tailed with one of {'.jpg', '.png', '.bmp', '.gif'}!")
    if not os.path.isdir(save_dir):
        raise ValueError('Wrong save_dir! Input a existing-directory!')
    
        
    def combine(ver, qr_name, bg_name, colorized, contrast, brightness, save_dir, save_name=None):
        from MyQR.mylibs.constant import alig_location
        from PIL import ImageEnhance, ImageFilter
        
        qr = Image.open(qr_name)
        qr = qr.convert('RGBA') if colorized else qr
        
        bg0 = Image.open(bg_name).convert('RGBA')
        bg0 = ImageEnhance.Contrast(bg0).enhance(contrast)
        bg0 = ImageEnhance.Brightness(bg0).enhance(brightness)

        if bg0.size[0] < bg0.size[1]:
            bg0 = bg0.resize((qr.size[0]-24, (qr.size[0]-24)*int(bg0.size[1]/bg0.size[0])))
        else:
            bg0 = bg0.resize(((qr.size[1]-24)*int(bg0.size[0]/bg0.size[1]), qr.size[1]-24))    
            
        bg = bg0 if colorized else bg0.convert('1')
        
        aligs = []
        if ver > 1:
            aloc = alig_location[ver-2]
            for a in range(len(aloc)):
                for b in range(len(aloc)):
                    if not ((a==b==0) or (a==len(aloc)-1 and b==0) or (a==0 and b==len(aloc)-1)):
                        for i in range(3*(aloc[a]-2), 3*(aloc[a]+3)):
                            for j in range(3*(aloc[b]-2), 3*(aloc[b]+3)):
                                aligs.append((i,j))

        for i in range(qr.size[0]-24):
            for j in range(qr.size[1]-24):
                if not ((i in (18,19,20)) or (j in (18,19,20)) or (i<24 and j<24) or (i<24 and j>qr.size[1]-49) or (i>qr.size[0]-49 and j<24) or ((i,j) in aligs) or (i%3==1 and j%3==1) or (bg0.getpixel((i,j))[3]==0)):
                    qr.putpixel((i+12,j+12), bg.getpixel((i,j)))
        
        qr_name = os.path.join(save_dir, os.path.splitext(os.path.basename(bg_name))[0] + '_qrcode.png') if not save_name else os.path.join(save_dir, save_name)
        qr.resize((qr.size[0]*3, qr.size[1]*3)).save(qr_name)
        return qr_name

    tempdir = os.path.join(os.path.expanduser('~'), '.myqr')
    
    try:
        if not os.path.exists(tempdir):
            os.makedirs(tempdir)

        ver, qr_name = theqrmodule.get_qrcode(version, level, words, tempdir)

        if picture and picture[-4:]=='.gif':
            import imageio
             
            im = Image.open(picture)
            duration = im.info.get('duration', 0)
            im.save(os.path.join(tempdir, '0.png'))
            while True:
                try:
                    seq = im.tell()
                    im.seek(seq + 1)
                    im.save(os.path.join(tempdir, '%s.png' %(seq+1)))
                except EOFError:
                    break
            
            imsname = []
            for s in range(seq+1):
                bg_name = os.path.join(tempdir, '%s.png' % s)
                imsname.append(combine(ver, qr_name, bg_name, colorized, contrast, brightness, tempdir))
            
            ims = [imageio.imread(pic) for pic in imsname]
            qr_name = os.path.join(save_dir, os.path.splitext(os.path.basename(picture))[0] + '_qrcode.gif') if not save_name else os.path.join(save_dir, save_name)
            imageio.mimwrite(qr_name, ims, '.gif', **{ 'duration': duration/1000 })
        elif picture:
            qr_name = combine(ver, qr_name, picture, colorized, contrast, brightness, save_dir, save_name)
        elif qr_name:
            qr = Image.open(qr_name)
            qr_name = os.path.join(save_dir, os.path.basename(qr_name)) if not save_name else os.path.join(save_dir, save_name)
            qr.resize((qr.size[0]*3, qr.size[1]*3)).save(qr_name)
          
        return ver, level, qr_name
        
    except:
        raise
    finally:
        import shutil
        if os.path.exists(tempdir):
            shutil.rmtree(tempdir) 
def main(input_folder='/Users/sophie/data/smart/track_test/',
         group='magprop',
         property='totarea',
         *smart_folder,
         **output_folder):
    """
    Parameters
    ----------
    input_folder : Folder string location of .json files with true_id's
    group : Indicate what group in .json file the property you wish to plot is, e.g. 'magprop'
    property : Indicate what property you wish to plot, e.g. 'totarea'
    smart_folder : Optional folder location to load maps and detections from if not in same folder as json
    output_folder: Optional output folder location of images created with algorithm

    Returns
    -------
    """
    #    if not input_folder:
    #        input_folder = os.getcwd() + '/'
    if not smart_folder:
        smart_folder = input_folder
    if not output_folder:
        output_folder = input_folder

    # load json files
    filenames = sorted(os.listdir(input_folder))
    filenames_json = [x for x in filenames if ".json" in x]

    filename_dates = [datetime_from_file_string(x) for x in filenames_json]
    start_date, end_date = filename_dates[0], filename_dates[
        len(filename_dates) - 1]

    date_strings = []
    for index, value in enumerate(filename_dates):
        if start_date <= value <= end_date:
            date_strings.append([filenames_json[index][:13],
                                 value])  #todo get rid of hardcoded 13

    # get properties (time and value for each id)
    property_values = {}
    x_position, y_position = {}, {}
    for date_string in date_strings:
        json_filename = input_folder + date_string[0] + "_properties.json"
        json_data = json.load(open(json_filename))
        for key, value in json_data['posprop']['trueid'].items():
            if str(value) in property_values:
                property_values[str(value)][0].append(date_string[1])
                property_values[str(value)][1].append(
                    json_data[group][property][str(key)])
            else:
                property_values[str(value)] = [[
                    date_string[1]
                ], [json_data[group][property][str(key)]]]
            if str(value) in x_position:
                x_position[str(value)].append(
                    json_data['posprop']['xcenarea'][str(key)])
                y_position[str(value)].append(
                    json_data['posprop']['ycenarea'][str(key)])
            else:
                x_position[str(value)] = [
                    json_data['posprop']['xcenarea'][str(key)]
                ]
                y_position[str(value)] = [
                    json_data['posprop']['ycenarea'][str(key)]
                ]

    #----------------------------------------
    # get detection outlines as outline_edges

    count = 1
    for date_string in date_strings:
        detection_filename = smart_folder + date_string[0] + "_detections.fits"
        detection_map = sunpy.map.Map(detection_filename)

        #----------------------------------------
        # get actual image of sun
        magnetogram_filename = smart_folder + date_string[0] + "_map.fits"
        magnetogram_map = sunpy.map.Map(magnetogram_filename)

        #----------------------------------------
        # read in numbers and centroids from json
        json_data = json.load(
            open(input_folder + date_string[0] + "_properties.json"))
        # smart id
        number_json = list(json_data['posprop']['trueid'].keys())
        # the tracked ids in the data
        number_json_values = [
            json_data['posprop']['trueid'][i] for i in number_json
        ]

        json_centx, json_centy = [], []
        for i in number_json:
            json_centx.append(json_data['posprop']['xcenarea'][i])
            json_centy.append(json_data['posprop']['ycenarea'][i])

        #----------------------------------------
        # plot evolution of property
        fig = plt.figure(figsize=(10, 12))
        ax1 = fig.add_subplot(2, 1, 2)

        colors = itertools.cycle([
            "black", "grey", "brown", "orange", "red", "pink", "purple",
            "blue", "turquoise", "green"
        ])

        for key, value in property_values.items():
            ax1.plot(value[0], value[1], label=key, marker='o', markersize=3.0)
            plt.legend(loc='upper left')

        import matplotlib.dates as mdates
        ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H'))
        plt.gcf().autofmt_xdate()
        plt.xlabel('Date and time [UT]')

        plt.axvline(date_string[1], linestyle="dashed", color="black")
        plt.ylabel('Total area [m.s.h]')  #todo should be meta data

        # plot detections on sunpy map of magnetogram
        ax1 = fig.add_subplot(2, 1, 1, projection=magnetogram_map)
        plt.subplots_adjust(left=0.1,
                            bottom=0.1,
                            right=0.95,
                            top=0.95,
                            wspace=None,
                            hspace=None)

        # Get same axes
        bottom_left = SkyCoord(-1000 * u.arcsec,
                               -1000 * u.arcsec,
                               frame=magnetogram_map.coordinate_frame)
        top_right = SkyCoord(1000 * u.arcsec,
                             1000 * u.arcsec,
                             frame=magnetogram_map.coordinate_frame)
        submap = magnetogram_map.submap(bottom_left, top_right)
        axes = wcsaxes_compat.gca_wcs(magnetogram_map.wcs)

        image = magnetogram_map.plot(vmin=-500, vmax=500, axes=axes)

        # Draw solar lat/lon grid
        axes.coords.grid(False)
        overlay = grid_overlay(axes, grid_spacing=10 * u.deg)
        #        plt.colorbar(label='B [G]')

        plt.contour(detection_map.data,
                    origin='lower',
                    colors='lightblue',
                    linewidths=0.5)

        # add numbers
        plt.plot(json_centx, json_centy, 'or', color='yellow', markersize=2.0)
        for x, y, numb in zip(json_centx, json_centy, number_json_values):
            plt.text(x + 10, y + 10, str(numb), color='yellow')
        plt.title(date_string[1].strftime('%Y %B %d %H:%M'))

        plt.savefig(output_folder + date_string[0] + "_tracking.png", dpi=150)
        plt.close()

    # convert to gif
    images = []
    filenames = sorted(os.listdir(input_folder))
    filenames_images = [x for x in filenames if "_tracking.png" in x]
    filenames_images = [input_folder + x for x in filenames_images]
    for filename in filenames_images:
        images.append(imageio.imread(filename))
    imageio.mimwrite(input_folder + 'SMART_evolution.gif', images, fps=1.)
Exemple #24
0
 def dump_video(self, fname, images, fps=6):
     fname = fname + '.mp4'
     fname = os.path.join(self.saved_dir, fname)
     imageio.mimwrite(fname, images, fps=fps)
Exemple #25
0
 def get_gameframe(self, gameframe):
     file_names = sorted(glob.glob('resources/animations/gameframe_upload/' + gameframe + '/*.bmp'), key=alphanum_key)
     print(file_names)
     images = [imageio.imread(filename) for filename in file_names]
     imageio.mimwrite('resources/animations/gameframe_temp.gif', images)
     return static_file('resources/animations/gameframe_temp.gif',  root=".", mimetype='image/gif')
 def save(self, fn, fps=3):
     imageio.mimwrite(fn, self.frames, fps=fps)
Exemple #27
0
        print(i)
    # open files, crop, and set nan values to 0
    map_ic = Map(folder_ic + f_ic)
    im_ic = np.fliplr(map_ic.data[y0:y1, x0:x1])
    im_ic[np.isnan(im_ic)] = 0
    map_M = Map(folder_M + filenames_M[i])
    im_M = np.fliplr(map_M.data[y0:y1, x0:x1])
    im_M[np.isnan(im_M)] = 0
    # update mean and standard deviation
    if i == 0:
        sn = np.std([im_ic, xn], axis=0)
        xn = np.mean([im_ic, xn], axis=0)
    else:
        xn = ((n - 1) * xn + im_ic) / n
        sn = np.sqrt(1 / (n - 1) * ((n - 2) * sn**2 + n / (n - 1) *
                                    (xn - im_ic)**2))
    # scale sn to [0, 255]
    sc_sd = bytscl(sn, 255, 0, 2500)
    # scale im_M to [0.0, 0.1] and apply cmap
    scz_M = bytscl(im_M, 1.0, -1500, 1500)
    cm_M = cmap(scz_M, bytes=True)
    # format sn from N*M to N*M*4 (where col4 is 255)
    cm_sd = np.dstack((sc_sd, sc_sd, sc_sd, np.ones(sc_sd.shape) * 255))
    # overlay
    im_to_save = np.array(0.5 * cm_M + 0.5 * cm_sd, dtype='uint8')
    imageio.imwrite(f'{date}/{i}.png', im_to_save, format='png')
    mov[i] = im_to_save

# save mov using imageio.mimwrite
imageio.mimwrite(f'{date}.mp4', mov, format='mp4', fps=10)
Exemple #28
0
        else:
            all_rgb_fine.append(outputs.coarse.rgb)
    rgb_fine = torch.cat(all_rgb_fine)
    # rgb_fine (V*H*W, 3)
    # depth_fine (V*H*W)

    frames = rgb_fine.view(-1, H, W, 3)
    #  depth_fine = depth_fine.view(args.num_views, H, W, 1)

print("Writing video")
vid_name = "{:04}".format(args.subset)
if args.split == "test":
    vid_name = "t" + vid_name
elif args.split == "val":
    vid_name = "v" + vid_name
vid_name += "_v" + "_".join(map(lambda x: "{:03}".format(x), primary))
vid_path = os.path.join(args.visual_path, args.name,
                        "video" + vid_name + ".mp4")
viewimg_path = os.path.join(args.visual_path, args.name,
                            "video" + vid_name + "_view.jpg")
imageio.mimwrite(vid_path, (frames.cpu().numpy() * 255).astype(np.uint8),
                 fps=args.fps,
                 quality=8)

img_np = (data["images"][src_view].permute(0, 2, 3, 1) * 0.5 + 0.5).numpy()
img_np = (img_np * 255).astype(np.uint8)
img_np = np.hstack((*img_np, ))
imageio.imwrite(viewimg_path, img_np)

print("Wrote to", vid_path, "view:", viewimg_path)
Exemple #29
0
def get_mails(account, image_output_path, gif_duration, image_name):
    today = get_formatted_date()
    image_count = 0
    images = []
    imagesDelete = []
    msg = ''

    _LOGGER.debug("Attempting to find Informed Delivery mail")

    (rv, data) = account.search(None,
                                '(FROM "' + USPS_Mail_Email + '" SUBJECT "' +
                                USPS_Mail_Subject + '" SENTON "' + today + '")'
                                )

    # Check to see if the path exists, if not make it
    pathcheck = os.path.isdir(image_output_path)
    if not pathcheck:
        try:
            os.makedirs(image_output_path)
        except Exception as err:
            _LOGGER.critical("Error creating directory: %s", str(err))

    # Clean up image directory
    _LOGGER.debug("Cleaning up image directory: %s", str(image_output_path))
    cleanup_images(image_output_path)

    if rv == 'OK':
        _LOGGER.debug("Informed Delivery email found processing...")
        for num in data[0].split():
            (rv, data) = account.fetch(num, '(RFC822)')
            msg = email.message_from_string(data[0][1].decode('utf-8'))

            # walking through the email parts to find images
            for part in msg.walk():
                if part.get_content_maintype() == 'multipart':
                    continue
                if part.get('Content-Disposition') is None:
                    continue

                _LOGGER.debug("Extracting image from email")
                filepath = image_output_path + part.get_filename()

                # Log error message if we are unable to open the filepath for
                # some reason
                try:
                    fp = open(filepath, 'wb')
                except Exception as err:
                    _LOGGER.critical("Error opening filepath: %s", str(err))
                fp.write(part.get_payload(decode=True))
                images.append(filepath)
                image_count = image_count + 1
                fp.close()

        # Remove duplicate images
        _LOGGER.debug("Removing duplicate images.")
        images = list(dict.fromkeys(images))

        # Create copy of image list for deleting temporary images
        imagesDelete = images[:]

        # Look for mail pieces without images image
        html_text = str(msg)
        link_pattern = re.compile(r'\bimage-no-mailpieces?700\.jpg\b')
        search = link_pattern.search(html_text)
        if search is not None:
            images.append(os.path.dirname(__file__) +
                          '/image-no-mailpieces700.jpg')
            image_count = image_count + 1
            _LOGGER.debug("Placeholder image found using: " +
                          "image-no-mailpieces700.jpg.")

        # Remove USPS announcement images
        _LOGGER.debug("Removing USPS announcement images.")
        remove_terms = ['mailerProvidedImage', 'ra_0', 'Mail Attachment.txt']
        images = [el for el in images if not any(ignore in el for ignore
                                                 in remove_terms)]
        image_count = len(images)
        _LOGGER.debug("Image Count: %s", str(image_count))

        if image_count > 0:
            all_images = []

            # _LOGGER.debug("Resizing images to 700x315...")
            # # Resize images to 700x315
            # all_images = resize_images(all_images)

            # Create numpy array of images
            _LOGGER.debug("Creating array of image files...")
            all_images = [io.imread(image) for image in images]

            try:
                _LOGGER.debug("Generating animated GIF")
                # Use ImageIO to create mail images
                io.mimwrite(os.path.join(image_output_path, image_name),
                            all_images, duration=gif_duration)
                _LOGGER.info("Mail image generated.")
            except Exception as err:
                _LOGGER.error("Error attempting to generate image: %s",
                              str(err))
            for image in imagesDelete:
                try:
                    os.remove(image)
                except Exception as err:
                    _LOGGER.error("Error attempting to remove image: %s",
                                  str(err))

        elif image_count == 0:
            _LOGGER.info("No mail found.")
            filecheck = os.path.isfile(image_output_path + image_name)
            if filecheck:
                try:
                    _LOGGER.debug("Removing " + image_output_path +
                                  image_name)
                    os.remove(image_output_path + image_name)
                except Exception as err:
                    _LOGGER.error("Error attempting to remove image: %s",
                                  str(err))
            try:
                _LOGGER.debug("Copying nomail gif")
                copyfile(os.path.dirname(__file__) + '/mail_none.gif',
                         image_output_path + image_name)
            except Exception as err:
                _LOGGER.error("Error attempting to copy image: %s", str(err))

    return image_count
buffer_a_data = np.random.uniform(0.0, 1.0, (H, W, 4)).astype('f4')
buffer_a = context.buffer(buffer_a_data)
buffer_b_data = np.zeros((H, W, 4)).astype('f4')
buffer_b = context.buffer(buffer_b_data)

imgs = []
last_buffer = buffer_b
for i in range(FRAMES):
    toggle = True if i % 2 else False
    buffer_a.bind_to_storage_buffer(1 if toggle else 0)
    buffer_b.bind_to_storage_buffer(0 if toggle else 1)

    # toggle 2 buffers as input and output
    last_buffer = buffer_a if toggle else buffer_b

    # local invocation id x -> pixel x
    # work groupid x -> pixel y
    # eg) buffer[x, y] = gl_LocalInvocationID.x + gl_WorkGroupID.x * W
    compute_shader.run(group_x=H, group_y=1)

    # print out
    output = np.frombuffer(last_buffer.read(), dtype=np.float32)
    output = output.reshape((H, W, 4))
    output = np.multiply(output, 255).astype(np.uint8)
    imgs.append(output)

# if you don't want to use imageio, remove this section
out_path = f"{OUTPUT_DIRPATH}/debug.gif"
print("Writing GIF anim to", out_path)
imageio.mimwrite(out_path, imgs, "GIF", duration=0.15)
Exemple #31
0
def main(wf):
	import numpy, imageio
	from PIL import Image
	from bs4 import BeautifulSoup as BS

	#####################################################
	#setup initial "quick" static image
	print("Getting images...")
	urllib.urlretrieve("http://radar.weather.gov/Warnings/Short/"+loc+"_Warnings_0.gif","static/wn.gif") #warnings layer
	urllib.urlretrieve("http://radar.weather.gov/Legend/N0R/"+loc+"_N0R_Legend_0.gif","static/lg.gif") #legends
	urllib.urlretrieve("http://radar.weather.gov/RadarImg/N0R/"+loc+"_N0R_0.gif","static/rd.gif") #static radar

	bg=Image.open(staticdir+"grayscale.gif").convert('RGBA')	#bg=background image
	cnty=Image.open(staticdir+"overlay.gif").convert('RGBA')	#cnty=county, roads, state, city overlay
	wn=Image.open(staticdir+"wn.gif").convert('RGBA')# wn=warnings layer
	cnty.paste(wn,(0,0),wn)
	rd=Image.open(staticdir+"rd.gif").convert('RGBA')
	lg=Image.open(staticdir+"lg.gif").convert('RGBA')
	temp=Image.open(staticdir+"grayscale.gif").convert('RGBA')
	temp.paste(rd,(0,0),rd)
	temp.paste(cnty,(0,0),cnty)
	temp.paste(lg,(0,0),lg)
	temp.save(staticdir+'radar static.gif')

	#open saved static image with quicklook silencing output
	null = open(os.devnull,'wb')
	p=sp.Popen(['qlmanage', '-p', staticdir+'radar static.gif'],stderr=null,stdout=null)

	######################################################################
	#Parse html for links and download the 6 most recent images for region
	html = urllib.urlopen("http://radar.weather.gov/RadarImg/N0R/"+loc+"/?C=M;O=D")
	soup = BS(html,"html.parser")

	names=['']*6
	for i in range(5,11):
		n=i-5
		names[n]=soup.find_all('a')[i]['href'][:-4]
		print("downloading image "+names[n]+".gif")
		urllib.urlretrieve("http://radar.weather.gov/RadarImg/N0R/"+loc+"/"+names[n]+".gif",anmdir+names[n]+".gif")
		urllib.urlretrieve("http://radar.weather.gov/Legend/N0R/"+loc+"/"+names[n]+"_Legend.gif",anmdir+names[n]+"_Legend.gif")
		print("     downloaded image "+names[n]+".gif")

	print("   done")

	########################################################
	#Composite together overlay and underlay for each images
	print("Compositing..")
	images=['']*6
	for i,n in enumerate(names):
	 	im=Image.open(anmdir+n+".gif").convert('RGBA')
	 	lg=Image.open(anmdir+n+"_Legend.gif").convert('RGBA')
	 	temp=Image.open(staticdir+"grayscale.gif").convert('RGBA')
	 	temp.paste(im,(0,0),im)
	 	temp.paste(cnty,(0,0),cnty)
	 	temp.paste(lg,(0,0),lg)
	 	temp.save(anmdir+n+".gif")
	 	images[5-i]=imageio.imread(anmdir+n+".gif")
	print("   done")

	print("Saving/Compiling...")
	imageio.mimwrite("radar.gif",images,duration=[.25,.25,.25,.25,.25,1.5])
	print("   done")
	if not p.poll() and p.poll() != 0:
		print("Displaying...")
		np=sp.Popen(['qlmanage', '-p', 'radar.gif'],stderr=null,stdout=null)
		
		time.sleep(1)
		p.kill()
		sp.Popen("rm animations/*",shell=True)
		print("   done")
	print("Alfred Script done")
Exemple #32
0
def train():

    parser = config_parser()
    args = parser.parse_args()

    # Load data

    if args.dataset_type == 'llff':
        images, poses, bds, render_poses, i_test = load_llff_data(
            args.datadir,
            args.factor,
            recenter=True,
            bd_factor=.75,
            spherify=args.spherify)
        hwf = poses[0, :3, -1]
        poses = poses[:, :3, :4]
        print('Loaded llff', images.shape, render_poses.shape, hwf,
              args.datadir)
        if not isinstance(i_test, list):
            i_test = [i_test]

        if args.llffhold > 0:
            print('Auto LLFF holdout,', args.llffhold)
            i_test = np.arange(images.shape[0])[::args.llffhold]

        i_val = i_test
        i_train = np.array([
            i for i in np.arange(int(images.shape[0]))
            if (i not in i_test and i not in i_val)
        ])

        print('DEFINING BOUNDS')
        if args.no_ndc:
            near = np.ndarray.min(bds) * .9
            far = np.ndarray.max(bds) * 1.

        else:
            near = 0.
            far = 1.
        print('NEAR FAR', near, far)

    elif args.dataset_type == 'blender':
        images, poses, render_poses, hwf, i_split = load_blender_data(
            args.datadir, args.half_res, args.testskip)
        print('Loaded blender', images.shape, render_poses.shape, hwf,
              args.datadir)
        i_train, i_val, i_test = i_split

        near = 2.
        far = 6.

        if args.white_bkgd:
            images = images[..., :3] * images[..., -1:] + (1. -
                                                           images[..., -1:])
        else:
            images = images[..., :3]

    elif args.dataset_type == 'deepvoxels':

        images, poses, render_poses, hwf, i_split = load_dv_data(
            scene=args.shape, basedir=args.datadir, testskip=args.testskip)

        print('Loaded deepvoxels', images.shape, render_poses.shape, hwf,
              args.datadir)
        i_train, i_val, i_test = i_split

        hemi_R = np.mean(np.linalg.norm(poses[:, :3, -1], axis=-1))
        near = hemi_R - 1.
        far = hemi_R + 1.

    else:
        print('Unknown dataset type', args.dataset_type, 'exiting')
        return

    # Cast intrinsics to right types
    H, W, focal = hwf
    H, W = int(H), int(W)
    hwf = [H, W, focal]

    if args.render_test:
        render_poses = np.array(poses[i_test])

    # Create log dir and copy the config file
    basedir = args.basedir
    expname = args.expname
    os.makedirs(os.path.join(basedir, expname), exist_ok=True)
    f = os.path.join(basedir, expname, 'args.txt')
    with open(f, 'w') as file:
        for arg in sorted(vars(args)):
            attr = getattr(args, arg)
            file.write('{} = {}\n'.format(arg, attr))
    if args.config is not None:
        f = os.path.join(basedir, expname, 'config.txt')
        with open(f, 'w') as file:
            file.write(open(args.config, 'r').read())

    # Create nerf model
    render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer = create_nerf(
        args)
    global_step = start

    bds_dict = {
        'near': near,
        'far': far,
    }
    render_kwargs_train.update(bds_dict)
    render_kwargs_test.update(bds_dict)

    # Move testing data to GPU
    render_poses = torch.Tensor(render_poses).to(device)

    # Short circuit if only rendering out from trained model
    if args.render_only:
        print('RENDER ONLY')
        with torch.no_grad():
            if args.render_test:
                # render_test switches to test poses
                images = images[i_test]
            else:
                # Default is smoother render_poses path
                images = None

            testsavedir = os.path.join(
                basedir, expname, 'renderonly_{}_{:06d}'.format(
                    'test' if args.render_test else 'path', start))
            os.makedirs(testsavedir, exist_ok=True)
            print('test poses shape', render_poses.shape)

            rgbs, _ = render_path(render_poses,
                                  hwf,
                                  args.chunk,
                                  render_kwargs_test,
                                  gt_imgs=images,
                                  savedir=testsavedir,
                                  render_factor=args.render_factor)
            print('Done rendering', testsavedir)
            imageio.mimwrite(os.path.join(testsavedir, 'video.mp4'),
                             to8b(rgbs),
                             fps=30,
                             quality=8)

            return

    # Prepare raybatch tensor if batching random rays
    N_rand = args.N_rand
    use_batching = not args.no_batching
    if use_batching:
        # For random ray batching
        print('get rays')
        rays = np.stack(
            [get_rays_np(H, W, focal, p) for p in poses[:, :3, :4]],
            0)  # [N, ro+rd, H, W, 3]
        print('done, concats')
        rays_rgb = np.concatenate([rays, images[:, None]],
                                  1)  # [N, ro+rd+rgb, H, W, 3]
        rays_rgb = np.transpose(rays_rgb,
                                [0, 2, 3, 1, 4])  # [N, H, W, ro+rd+rgb, 3]
        rays_rgb = np.stack([rays_rgb[i] for i in i_train],
                            0)  # train images only
        rays_rgb = np.reshape(rays_rgb,
                              [-1, 3, 3])  # [(N-1)*H*W, ro+rd+rgb, 3]
        rays_rgb = rays_rgb.astype(np.float32)
        print('shuffle rays')
        np.random.shuffle(rays_rgb)

        print('done')
        i_batch = 0

    # Move training data to GPU
    images = torch.Tensor(images).to(device)
    poses = torch.Tensor(poses).to(device)
    if use_batching:
        rays_rgb = torch.Tensor(rays_rgb).to(device)

    N_iters = 200000 + 1
    print('Begin')
    print('TRAIN views are', i_train)
    print('TEST views are', i_test)
    print('VAL views are', i_val)

    # Summary writers
    # writer = SummaryWriter(os.path.join(basedir, 'summaries', expname))

    for i in trange(start, N_iters):
        time0 = time.time()

        # Sample random ray batch
        if use_batching:
            # Random over all images
            batch = rays_rgb[i_batch:i_batch + N_rand]  # [B, 2+1, 3*?]
            batch = torch.transpose(batch, 0, 1)
            batch_rays, target_s = batch[:2], batch[2]

            i_batch += N_rand
            if i_batch >= rays_rgb.shape[0]:
                print("Shuffle data after an epoch!")
                rand_idx = torch.randperm(rays_rgb.shape[0])
                rays_rgb = rays_rgb[rand_idx]
                i_batch = 0

        else:
            # Random from one image
            img_i = np.random.choice(i_train)
            target = images[img_i]
            pose = poses[img_i, :3, :4]

            if N_rand is not None:
                rays_o, rays_d = get_rays(
                    H, W, focal, torch.Tensor(pose))  # (H, W, 3), (H, W, 3)

                if i < args.precrop_iters:
                    dH = int(H // 2 * args.precrop_frac)
                    dW = int(W // 2 * args.precrop_frac)
                    coords = torch.stack(
                        torch.meshgrid(
                            torch.linspace(H // 2 - dH, H // 2 + dH - 1,
                                           2 * dH),
                            torch.linspace(W // 2 - dW, W // 2 + dW - 1,
                                           2 * dW)), -1)
                    if i == start:
                        print(
                            f"[Config] Center cropping of size {2*dH} x {2*dW} is enabled until iter {args.precrop_iters}"
                        )
                else:
                    coords = torch.stack(
                        torch.meshgrid(torch.linspace(0, H - 1, H),
                                       torch.linspace(0, W - 1, W)),
                        -1)  # (H, W, 2)

                coords = torch.reshape(coords, [-1, 2])  # (H * W, 2)
                select_inds = np.random.choice(coords.shape[0],
                                               size=[N_rand],
                                               replace=False)  # (N_rand,)
                select_coords = coords[select_inds].long()  # (N_rand, 2)
                rays_o = rays_o[select_coords[:, 0],
                                select_coords[:, 1]]  # (N_rand, 3)
                rays_d = rays_d[select_coords[:, 0],
                                select_coords[:, 1]]  # (N_rand, 3)
                batch_rays = torch.stack([rays_o, rays_d], 0)
                target_s = target[select_coords[:, 0],
                                  select_coords[:, 1]]  # (N_rand, 3)

        #####  Core optimization loop  #####
        rgb, disp, acc, extras = render(H,
                                        W,
                                        focal,
                                        chunk=args.chunk,
                                        rays=batch_rays,
                                        verbose=i < 10,
                                        retraw=True,
                                        **render_kwargs_train)

        optimizer.zero_grad()
        img_loss = img2mse(rgb, target_s)
        trans = extras['raw'][..., -1]
        loss = img_loss
        psnr = mse2psnr(img_loss)

        if 'rgb0' in extras:
            img_loss0 = img2mse(extras['rgb0'], target_s)
            loss = loss + img_loss0
            psnr0 = mse2psnr(img_loss0)

        loss.backward()
        optimizer.step()

        # NOTE: IMPORTANT!
        ###   update learning rate   ###
        decay_rate = 0.1
        decay_steps = args.lrate_decay * 1000
        new_lrate = args.lrate * (decay_rate**(global_step / decay_steps))
        for param_group in optimizer.param_groups:
            param_group['lr'] = new_lrate
        ################################

        dt = time.time() - time0
        # print(f"Step: {global_step}, Loss: {loss}, Time: {dt}")
        #####           end            #####

        # Rest is logging
        if i % args.i_weights == 0:
            path = os.path.join(basedir, expname, '{:06d}.tar'.format(i))
            torch.save(
                {
                    'global_step':
                    global_step,
                    'network_fn_state_dict':
                    render_kwargs_train['network_fn'].state_dict(),
                    'network_fine_state_dict':
                    render_kwargs_train['network_fine'].state_dict(),
                    'optimizer_state_dict':
                    optimizer.state_dict(),
                }, path)
            print('Saved checkpoints at', path)

        if i % args.i_video == 0 and i > 0:
            # Turn on testing mode
            with torch.no_grad():
                rgbs, disps = render_path(render_poses, hwf, args.chunk,
                                          render_kwargs_test)
            print('Done, saving', rgbs.shape, disps.shape)
            moviebase = os.path.join(basedir, expname,
                                     '{}_spiral_{:06d}_'.format(expname, i))
            imageio.mimwrite(moviebase + 'rgb.mp4',
                             to8b(rgbs),
                             fps=30,
                             quality=8)
            imageio.mimwrite(moviebase + 'disp.mp4',
                             to8b(disps / np.max(disps)),
                             fps=30,
                             quality=8)

            # if args.use_viewdirs:
            #     render_kwargs_test['c2w_staticcam'] = render_poses[0][:3,:4]
            #     with torch.no_grad():
            #         rgbs_still, _ = render_path(render_poses, hwf, args.chunk, render_kwargs_test)
            #     render_kwargs_test['c2w_staticcam'] = None
            #     imageio.mimwrite(moviebase + 'rgb_still.mp4', to8b(rgbs_still), fps=30, quality=8)

        if i % args.i_testset == 0 and i > 0:
            testsavedir = os.path.join(basedir, expname,
                                       'testset_{:06d}'.format(i))
            os.makedirs(testsavedir, exist_ok=True)
            print('test poses shape', poses[i_test].shape)
            with torch.no_grad():
                render_path(torch.Tensor(poses[i_test]).to(device),
                            hwf,
                            args.chunk,
                            render_kwargs_test,
                            gt_imgs=images[i_test],
                            savedir=testsavedir)
            print('Saved test set')

        if i % args.i_print == 0:
            tqdm.write(
                f"[TRAIN] Iter: {i} Loss: {loss.item()}  PSNR: {psnr.item()}")
        """
            print(expname, i, psnr.numpy(), loss.numpy(), global_step.numpy())
            print('iter time {:.05f}'.format(dt))

            with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_print):
                tf.contrib.summary.scalar('loss', loss)
                tf.contrib.summary.scalar('psnr', psnr)
                tf.contrib.summary.histogram('tran', trans)
                if args.N_importance > 0:
                    tf.contrib.summary.scalar('psnr0', psnr0)


            if i%args.i_img==0:

                # Log a rendered validation view to Tensorboard
                img_i=np.random.choice(i_val)
                target = images[img_i]
                pose = poses[img_i, :3,:4]
                with torch.no_grad():
                    rgb, disp, acc, extras = render(H, W, focal, chunk=args.chunk, c2w=pose,
                                                        **render_kwargs_test)

                psnr = mse2psnr(img2mse(rgb, target))

                with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):

                    tf.contrib.summary.image('rgb', to8b(rgb)[tf.newaxis])
                    tf.contrib.summary.image('disp', disp[tf.newaxis,...,tf.newaxis])
                    tf.contrib.summary.image('acc', acc[tf.newaxis,...,tf.newaxis])

                    tf.contrib.summary.scalar('psnr_holdout', psnr)
                    tf.contrib.summary.image('rgb_holdout', target[tf.newaxis])


                if args.N_importance > 0:

                    with tf.contrib.summary.record_summaries_every_n_global_steps(args.i_img):
                        tf.contrib.summary.image('rgb0', to8b(extras['rgb0'])[tf.newaxis])
                        tf.contrib.summary.image('disp0', extras['disp0'][tf.newaxis,...,tf.newaxis])
                        tf.contrib.summary.image('z_std', extras['z_std'][tf.newaxis,...,tf.newaxis])
        """

        global_step += 1
Exemple #33
0
def run_episode(env,
                policy,
                scaler,
                animate=False,
                logger=None,
                anim_name='ant_train'):
    """ Run single episode with option to animate

    Args:
        env: ai gym environment
        policy: policy object with sample() method
        scaler: scaler object, used to scale/offset each observation dimension
            to a similar range
        animate: boolean, True uses env.render() method to animate episode

    Returns: 4-tuple of NumPy arrays
        observes: shape = (episode len, obs_dim)
        actions: shape = (episode len, act_dim)
        rewards: shape = (episode len,)
        unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)
    """
    obs = env.reset()
    observes, actions, rewards, unscaled_obs = [], [], [], []
    detailed_rewards = np.array([0.0 for _ in range(6)])
    done = False
    step = 0.0
    scale, offset = scaler.get()
    scale[-1] = 1.0  # don't scale time step feature
    scale[-2] = 1.0  # don't scale time step feature
    offset[-1] = 0.0  # don't offset time step feature
    offset[-2] = 0.0  # don't offset time step feature
    rendered_frames = []
    while not done:
        if animate:
            rendered_frames.append(env.render("rgb_array"))
            if int(step * 1000) % 50 == 0:
                print("Rendering: {}%".format(int(step * 100)))
        obs = obs.astype(np.float64).reshape((1, -1))
        obs = np.append(obs, [[step]], axis=1)  # add time step feature
        unscaled_obs.append(obs)
        obs = (obs - offset) * scale  # center and scale observations
        observes.append(obs)
        action = policy.sample(obs).reshape((1, -1)).astype(np.float64)
        actions.append(action)
        # тут магия, раньше было просто action, но это массив в массиве (зачем?) поэтому action[0]
        obs, reward, done, _ = env.step(action[0])
        if not isinstance(reward, float):
            reward = np.asscalar(reward)
        rewards.append(reward)
        detailed_rewards += np.array(env.env.rewards)
        step += 1e-3  # increment time step feature

    if animate:
        p = '~/Desktop'
        if logger is not None:
            p = logger.path
        imageio.mimwrite('{}/{}.mp4'.format(p, anim_name),
                         np.array(rendered_frames),
                         fps=60)

    return (np.concatenate(observes), np.concatenate(actions),
            np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs),
            np.array(detailed_rewards))
Exemple #34
0
def train():

    parser = config_parser()
    args = parser.parse_args()

    # Load data

    if args.dataset_type == 'llff':
        images, poses, bds, render_poses, i_test = load_llff_data(
            args.datadir,
            args.factor,
            recenter=True,
            bd_factor=.75,
            spherify=args.spherify)
        hwf = poses[0, :3, -1]
        poses = poses[:, :3, :4]
        print('Loaded llff', images.shape, render_poses.shape, hwf,
              args.datadir)
        if not isinstance(i_test, list):
            i_test = [i_test]

        if args.llffhold > 0:
            print('Auto LLFF holdout,', args.llffhold)
            i_test = np.arange(images.shape[0])[::args.llffhold]

        i_val = i_test
        i_train = np.array([
            i for i in np.arange(int(images.shape[0]))
            if (i not in i_test and i not in i_val)
        ])

        print('DEFINING BOUNDS')
        if args.no_ndc:
            near = tf.reduce_min(bds) * .9
            far = tf.reduce_max(bds) * 1.
        else:
            near = 0.
            far = 1.
        print('NEAR FAR', near, far)

    elif args.dataset_type == 'blender':
        images, poses, render_poses, hwf, i_split = load_blender_data(
            args.datadir, args.half_res, args.testskip)
        print('Loaded blender', images.shape, render_poses.shape, hwf,
              args.datadir)
        i_train, i_val, i_test = i_split

        near = 2.
        far = 6.

        if args.white_bkgd:
            images = images[..., :3] * images[..., -1:] + (1. -
                                                           images[..., -1:])
        else:
            images = images[..., :3]

    elif args.dataset_type == 'deepvoxels':

        images, poses, render_poses, hwf, i_split = load_dv_data(
            scene=args.shape, basedir=args.datadir, testskip=args.testskip)

        print('Loaded deepvoxels', images.shape, render_poses.shape, hwf,
              args.datadir)
        i_train, i_val, i_test = i_split

        hemi_R = np.mean(np.linalg.norm(poses[:, :3, -1], axis=-1))
        near = hemi_R - 1.
        far = hemi_R + 1.

    else:
        print('Unknown dataset type', args.dataset_type, 'exiting')
        return

    # Cast intrinsics to right types
    H, W, focal = hwf
    H, W = int(H), int(W)
    hwf = [H, W, focal]

    if args.render_test:
        render_poses = np.array(poses[i_test])

    # Create log dir and copy the config file
    basedir = args.basedir
    expname = args.expname
    os.makedirs(os.path.join(basedir, expname), exist_ok=True)
    f = os.path.join(basedir, expname, 'args.txt')
    with open(f, 'w') as file:
        for arg in sorted(vars(args)):
            attr = getattr(args, arg)
            file.write('{} = {}\n'.format(arg, attr))
    if args.config is not None:
        f = os.path.join(basedir, expname, 'config.txt')
        with open(f, 'w') as file:
            file.write(open(args.config, 'r').read())

    # Create nerf model
    render_kwargs_train, render_kwargs_test, start, grad_vars, models = create_nerf(
        args)

    bds_dict = {
        'near': tf.cast(near, tf.float32),
        'far': tf.cast(far, tf.float32),
    }
    render_kwargs_train.update(bds_dict)
    render_kwargs_test.update(bds_dict)

    # Short circuit if only rendering out from trained model
    if args.render_only:
        print('RENDER ONLY')
        if args.render_test:
            # render_test switches to test poses
            images = images[i_test]
        else:
            # Default is smoother render_poses path
            images = None

        testsavedir = os.path.join(
            basedir, expname, 'renderonly_{}_{:06d}'.format(
                'test' if args.render_test else 'path', start))
        os.makedirs(testsavedir, exist_ok=True)
        print('test poses shape', render_poses.shape)

        rgbs, _ = render_path(render_poses,
                              hwf,
                              args.chunk,
                              render_kwargs_test,
                              gt_imgs=images,
                              savedir=testsavedir,
                              render_factor=args.render_factor)
        print('Done rendering', testsavedir)
        imageio.mimwrite(os.path.join(testsavedir, 'video.mp4'),
                         to8b(rgbs),
                         fps=30,
                         quality=8)

        return

    # Create optimizer
    lrate = args.lrate
    if args.lrate_decay > 0:
        lrate = tf.keras.optimizers.schedules.ExponentialDecay(
            lrate, decay_steps=args.lrate_decay * 1000, decay_rate=0.1)
    optimizer = tf.keras.optimizers.Adam(lrate)
    models['optimizer'] = optimizer

    global_step = tf.compat.v1.train.get_or_create_global_step()
    global_step.assign(start)

    # Prepare raybatch tensor if batching random rays
    N_rand = args.N_rand
    use_batching = not args.no_batching
    if use_batching:
        # For random ray batching
        print('get rays')
        rays = np.stack(
            [get_rays_np(H, W, focal, p) for p in poses[:, :3, :4]],
            0)  # [N, ro+rd, H, W, 3]
        print('done, concats')
        rays_rgb = np.concatenate([rays, images[:, None]],
                                  1)  # [N, ro+rd+rgb, H, W, 3]
        rays_rgb = np.transpose(rays_rgb,
                                [0, 2, 3, 1, 4])  # [N, H, W, ro+rd+rgb, 3]
        rays_rgb = np.stack([rays_rgb[i] for i in i_train],
                            0)  # train images only
        rays_rgb = np.reshape(rays_rgb,
                              [-1, 3, 3])  # [(N-1)*H*W, ro+rd+rgb, 3]
        rays_rgb = rays_rgb.astype(np.float32)
        print('shuffle rays')
        np.random.shuffle(rays_rgb)
        print('done')
        i_batch = 0

    N_iters = 1000000
    print('Begin')
    print('TRAIN views are', i_train)
    print('TEST views are', i_test)
    print('VAL views are', i_val)
    print(basedir)

    # Summary writers
    writer = tf.contrib.summary.create_file_writer(
        os.path.join(basedir, 'summaries', expname))
    writer.set_as_default()

    for i in range(start, N_iters):
        time0 = time.time()

        # Sample random ray batch

        if use_batching:
            # Random over all images
            batch = rays_rgb[i_batch:i_batch + N_rand]  # [B, 2+1, 3*?]
            batch = tf.transpose(batch, [1, 0, 2])
            batch_rays, target_s = batch[:2], batch[2]

            i_batch += N_rand
            if i_batch >= rays_rgb.shape[0]:
                np.random.shuffle(rays_rgb)
                i_batch = 0

        else:
            # Random from one image
            img_i = np.random.choice(i_train)
            target = images[img_i]
            pose = poses[img_i, :3, :4]

            if N_rand is not None:
                rays_o, rays_d = get_rays(H, W, focal, pose)
                coords = tf.stack(
                    tf.meshgrid(tf.range(H), tf.range(W), indexing='ij'), -1)
                coords = tf.reshape(coords, [-1, 2])
                select_inds = np.random.choice(coords.shape[0],
                                               size=[N_rand],
                                               replace=False)
                select_inds = tf.gather_nd(coords, select_inds[:, tf.newaxis])
                rays_o = tf.gather_nd(rays_o, select_inds)
                rays_d = tf.gather_nd(rays_d, select_inds)
                batch_rays = tf.stack([rays_o, rays_d], 0)
                target_s = tf.gather_nd(target, select_inds)

        #####  Core optimization loop  #####

        with tf.GradientTape() as tape:

            rgb, disp, acc, extras = render(H,
                                            W,
                                            focal,
                                            chunk=args.chunk,
                                            rays=batch_rays,
                                            verbose=i < 10,
                                            retraw=True,
                                            **render_kwargs_train)

            img_loss = img2mse(rgb, target_s)
            trans = extras['raw'][..., -1]
            loss = img_loss
            psnr = mse2psnr(img_loss)

            if 'rgb0' in extras:
                img_loss0 = img2mse(extras['rgb0'], target_s)
                loss += img_loss0
                psnr0 = mse2psnr(img_loss0)

        gradients = tape.gradient(loss, grad_vars)
        optimizer.apply_gradients(zip(gradients, grad_vars))

        dt = time.time() - time0

        #####           end            #####

        # Rest is logging

        def save_weights(net, prefix, i):
            path = os.path.join(basedir, expname,
                                '{}_{:06d}.npy'.format(prefix, i))
            np.save(path, net.get_weights())
            print('saved weights at', path)

        if i % args.i_weights == 0:
            for k in models:
                save_weights(models[k], k, i)

        if i % args.i_video == 0 and i > 0:

            rgbs, disps = render_path(render_poses, hwf, args.chunk,
                                      render_kwargs_test)
            print('Done, saving', rgbs.shape, disps.shape)
            moviebase = os.path.join(basedir, expname,
                                     '{}_spiral_{:06d}_'.format(expname, i))
            imageio.mimwrite(moviebase + 'rgb.mp4',
                             to8b(rgbs),
                             fps=30,
                             quality=8)
            imageio.mimwrite(moviebase + 'disp.mp4',
                             to8b(disps / np.max(disps)),
                             fps=30,
                             quality=8)

            if args.use_viewdirs:
                render_kwargs_test['c2w_staticcam'] = render_poses[0][:3, :4]
                rgbs_still, _ = render_path(render_poses, hwf, args.chunk,
                                            render_kwargs_test)
                render_kwargs_test['c2w_staticcam'] = None
                imageio.mimwrite(moviebase + 'rgb_still.mp4',
                                 to8b(rgbs_still),
                                 fps=30,
                                 quality=8)

        if i % args.i_testset == 0 and i > 0:
            testsavedir = os.path.join(basedir, expname,
                                       'testset_{:06d}'.format(i))
            os.makedirs(testsavedir, exist_ok=True)
            print('test poses shape', poses[i_test].shape)
            render_path(poses[i_test],
                        hwf,
                        args.chunk,
                        render_kwargs_test,
                        gt_imgs=images[i_test],
                        savedir=testsavedir)
            print('Saved test set')

        if i % args.i_print == 0 or i < 10:

            print(expname, i, psnr.numpy(), loss.numpy(), global_step.numpy())
            print('iter time {:.05f}'.format(dt))
            with tf.contrib.summary.record_summaries_every_n_global_steps(
                    args.i_print):
                tf.contrib.summary.scalar('loss', loss)
                tf.contrib.summary.scalar('psnr', psnr)
                tf.contrib.summary.histogram('tran', trans)
                if args.N_importance > 0:
                    tf.contrib.summary.scalar('psnr0', psnr0)

            if i % args.i_img == 0:

                # Log a rendered validation view to Tensorboard
                img_i = np.random.choice(i_val)
                target = images[img_i]
                pose = poses[img_i, :3, :4]

                rgb, disp, acc, extras = render(H,
                                                W,
                                                focal,
                                                chunk=args.chunk,
                                                c2w=pose,
                                                **render_kwargs_test)

                psnr = mse2psnr(img2mse(rgb, target))

                with tf.contrib.summary.record_summaries_every_n_global_steps(
                        args.i_img):

                    tf.contrib.summary.image('rgb', to8b(rgb)[tf.newaxis])
                    tf.contrib.summary.image('disp', disp[tf.newaxis, ...,
                                                          tf.newaxis])
                    tf.contrib.summary.image('acc', acc[tf.newaxis, ...,
                                                        tf.newaxis])

                    tf.contrib.summary.scalar('psnr_holdout', psnr)
                    tf.contrib.summary.image('rgb_holdout', target[tf.newaxis])

                if args.N_importance > 0:

                    with tf.contrib.summary.record_summaries_every_n_global_steps(
                            args.i_img):
                        tf.contrib.summary.image(
                            'rgb0',
                            to8b(extras['rgb0'])[tf.newaxis])
                        tf.contrib.summary.image(
                            'disp0', extras['disp0'][tf.newaxis, ...,
                                                     tf.newaxis])
                        tf.contrib.summary.image(
                            'z_std', extras['z_std'][tf.newaxis, ...,
                                                     tf.newaxis])

        global_step.assign_add(1)
Exemple #35
0
nextDateHeadline = nextDateImages + timedelta(days=1)

print "getting images for {date}".format(date=nextDateImages)
imageData = getImagesInfo(nextDateImages)
gif = None
if len(imageData) > 0:
    #Get yesterday headline from TheGuardian
    print "obtaining {date} headline from The Guardian".format(date=nextDateHeadline)
    headline = getGuardianHeadline(nextDateHeadline)
    #Download images for yesterday
    print "downloading images from DSCOVR and writing headline"
    gifFramesArray = downloadImages(imageData, 'png', headline)
    print "obtained {x} images".format(x=len(gifFramesArray))
    #Generate the gif based on yesterday images
    print "generating gif..."
    imageio.mimwrite(config.GIF_PATH, gifFramesArray, loop=0, duration=0.6, quantizer='wu', subrectangles=False)
    print "gif saved to {gif}".format(gif=config.GIF_PATH)
    gif = config.GIF_PATH
else:
    print "The world stopped"
    sys.exit()
    #gif = config.GIF_PATH_NODATA
    #Use a gif from Giphy (i.e. "World stopped") as illustration?



print "Writing Tumblr caption"
text = writeCaption(nextDateImages, headline)

print "Posting to Tumblr"
post = postToTumblr(gif, text, nextDateImages)
# init buffers
buffer_a_data = np.random.uniform(0.0, 1.0, (H, W, 4)).astype('f4')
buffer_a = context.buffer(buffer_a_data)
buffer_b_data = np.zeros((H, W, 4)).astype('f4')
buffer_b = context.buffer(buffer_b_data)

imgs = []
last_buffer = buffer_b
for i in range(FRAMES):
    toggle = True if i % 2 else False
    buffer_a.bind_to_storage_buffer(1 if toggle else 0)
    buffer_b.bind_to_storage_buffer(0 if toggle else 1)

    # toggle 2 buffers as input and output
    last_buffer = buffer_a if toggle else buffer_b

    # local invocation id x -> pixel x
    # work groupid x -> pixel y
    # eg) buffer[x, y] = gl_LocalInvocationID.x + gl_WorkGroupID.x * W
    compute_shader.run(group_x=H, group_y=1)

    # print out
    output = np.frombuffer(last_buffer.read(), dtype=np.float32)
    output = output.reshape((H, W, 4))
    output = np.multiply(output, 255).astype(np.uint8)
    imgs.append(output)

# if you don't want to use imageio, remove this line
imageio.mimwrite(f"./{OUTPUT_DIRPATH}/debug.gif", imgs, "GIF", duration=0.15)
def create_gif(image_titles, num_states, iteration):
    images = []
    for image_title in image_titles:
        im = imageio.imread('Media/'+image_title+'.png')
        images.append(im)
    imageio.mimwrite(os.path.dirname(__file__) + r'/Media/GIFs/{}_states_model_iteration_{}.gif'.format(num_states, iteration), np.array(images), duration =0.8)
Exemple #38
0
    all_outputs.append(colorize(position / 1))
    if iter % 3 == 0:
        plt.subplot(1, 2, 1)
        # plt.imshow(np.abs(potential))
        # plt.imshow(colorize(momentum))
        # plt.imshow(momentum.real)
        plt.imshow(np.minimum(10,
                              np.sqrt(momentum.real**2 +
                                      momentum.imag**2)))  #[200:300,200:300])
        plt.subplot(1, 2, 2)
        # plt.imshow(all_outputs[-1])
        plt.imshow(np.sqrt(position.real**2 + position.imag**2))
        # plt.subplot(1, 3, 3)
        # plt.imshow(ground_potential + 0.00001*1.0 / gaussian_filter(np.abs(np.maximum(position, 0.01)*1), sigma=15)**2)
        # plt.imshow(colorize(fftshift(fft2(position))))
        plt.draw_all()
        plt.pause(0.001)

print("normalizing and reshaping")
# from 0 - 1 (nIter, width, width, 3) to 0 - 255 (nIter, 3, width, width)
all_outputs = np.array(all_outputs) * 255
all_outputs = np.array(np.floor(all_outputs), dtype=int)
# all_outputs = np.transpose(all_outputs, (0, 3, 1, 2))
print("Writing to video")
# array2gif.write_gif(all_outputs, "schrodinger2dOut.gif")

imageio.mimwrite(first_unoccupied("schrodinger2dOut%s.mp4"),
                 all_outputs,
                 "mp4",
                 fps=20)
assert [len(row) for row in worldMap] == [sizeHorizontal] * sizeVertical
#------------------------------------------------
fig = plt.figure()
ax = fig.add_subplot(111, aspect=1.1)
worldMapForPlot = [map(lambda cell: 0 if cell == 'red' else 1, row) for row in worldMap]
plt.title('World Map')
ax = sns.heatmap(np.array(worldMapForPlot), cmap=ListedColormap(['red', 'green']), annot=False, cbar=True, linecolor='k', linewidths=1, xticklabels=['']*sizeHorizontal, yticklabels=['']*sizeVertical)
fig.savefig('HistogramFilter/Animation/worldMap.png')
plt.close()
#------------------------------------------------
probSensorIsRight = 0.7
probMoveIsSuccessful = 0.8
#------------------------------------------------
flatInitProb = 1 / (float(sizeHorizontal * sizeVertical))
flatRow = [flatInitProb for cell in range(sizeHorizontal)]
flatLandscape = list(itertools.repeat(flatRow, sizeVertical))
#------------------------------------------------
locationPrinter(worldMap, flatLandscape, '0-initial', frameDir, True)
#------------------------------------------------
step = generalStep(probMoveIsSuccessful, probSensorIsRight, worldMap, frameDir)
#------------------------------------------------
l0 = step('stay', 'green', flatLandscape, '1-stay-green')
l1 = step('right', 'green', l0, '2-right-green')
l2 = step('down', 'green', l1, '3-down-green')
l3 = step('down', 'green', l2, '4-down-green')
l4 = step('right', 'green', l3, '5-right-green')
#------------------------------------------------
allFrames = map(lambda img: imageio.imread(img), sorted(glob.glob('%s/*.png' % frameDir), key = lambda frame: int(frame.split('/')[2].split('-')[0])))
imageio.mimwrite('%s/animatedLocalizer.gif' % animationDir, allFrames, duration=[1.5]*len(allFrames))
#------------------------------------------------
Exemple #40
0
def createAnimation(frametype, eid, fname, types):
    import imageio

    url = '{}/index.php?view=image&width={}&eid={}&username={}&password={}'.format(
        g.config['portal'], g.config['animation_width'], eid, g.config['user'],
        urllib.parse.quote(g.config['password'], safe=''))
    api_url = '{}/events/{}.json?username={}&password={}'.format(
        g.config['api_portal'], eid, g.config['user'],
        urllib.parse.quote(g.config['password'], safe=''))
    disp_api_url = '{}/events/{}.json?username={}&password=***'.format(
        g.config['api_portal'], eid, g.config['user'])

    rtries = g.config['animation_max_tries']
    sleep_secs = g.config['animation_retry_sleep']
    fid = None
    totframes = 0
    length = 0
    fps = 0

    target_fps = 2
    buffer_seconds = 5  #seconds
    while True and rtries:
        g.logger.debug(
            f"animation: Try:{g.config['animation_max_tries']-rtries+1} Getting {disp_api_url}"
        )
        r = None
        try:
            resp = requests.get(api_url)
            resp.raise_for_status()
            r = resp.json()
        except requests.exceptions.RequestException as e:
            g.logger.error(f'{e}')
            continue

        r_event = r['event']['Event']
        r_frame = r['event']['Frame']
        r_frame_len = len(r_frame)

        if frametype == 'alarm':
            fid = int(r_event.get('AlarmFrameId'))
        elif frametype == 'snapshot':
            fid = int(r_event.get('MaxScoreFrameId'))
        else:
            fid = int(frameid)

        #g.logger.debug (f'animation: Response {r}')
        if r_frame is None or not r_frame_len:
            g.logger.debug(
                f'No frames found yet via API, deferring check for {sleep_secs} seconds...'
            )
            rtries = rtries - 1
            time.sleep(sleep_secs)
            continue

        totframes = len(r_frame)
        total_time = round(float(r_frame[-1]['Delta']))
        fps = round(totframes / total_time)

        if not r_frame_len >= fid + fps * buffer_seconds:
            g.logger.debug(
                f'I\'ve got {r_frame_len} frames, but that\'s not enough as anchor frame is type:{frametype}:{fid}, deferring check for {sleep_secs} seconds...'
            )
            rtries = rtries - 1
            time.sleep(sleep_secs)
            continue

        g.logger.debug('animation: Got {} frames'.format(r_frame_len))
        break
        # fid is the anchor frame
    if not rtries:
        g.logger.error('animation: Bailing, failed too many times')
        return

    g.logger.debug('animation: event fps={}'.format(fps))
    start_frame = int(max(fid - (buffer_seconds * fps), 1))
    end_frame = int(min(totframes, fid + (buffer_seconds * fps)))
    skip = round(fps / target_fps)

    g.logger.debug(
        f'animation: anchor={frametype} start={start_frame} end={end_frame} skip={skip}'
    )
    g.logger.debug('animation: Grabbing frames...')
    images = []
    od_images = []

    # use frametype  (alarm/snapshot) to get od anchor, because fid can be wrong when translating from videos
    od_url = '{}/index.php?view=image&eid={}&fid={}&username={}&password={}&width={}'.format(
        g.config['portal'], eid, frametype, g.config['user'],
        urllib.parse.quote(g.config['password'], safe=''),
        g.config['animation_width'])
    g.logger.debug(f'Grabbing anchor frame: {frametype}...')
    try:
        od_frame = imageio.imread(od_url)
        # 1 second @ 2fps
        od_images.append(od_frame)
        od_images.append(od_frame)
    except Exception as e:
        g.logger.error(f'Error downloading anchor  frame: Error:{e}')

    for i in range(start_frame, end_frame + 1, skip):
        p_url = url + '&fid={}'.format(i)
        g.logger.debug(f'animation: Grabbing Frame:{i}', level=2)
        try:
            images.append(imageio.imread(p_url))
        except Exception as e:
            g.logger.error(f'Error downloading frame {i}: Error:{e}')

    g.logger.debug(f'animation: Saving {fname}...')
    try:
        if 'mp4' in types.lower():
            g.logger.debug('Creating MP4...')
            mp4_final = od_images.copy()
            mp4_final.extend(images)
            imageio.mimwrite(fname + '.mp4',
                             mp4_final,
                             format='mp4',
                             fps=target_fps)
            size = os.stat(fname + '.mp4').st_size
            g.logger.debug(
                f'animation: saved to {fname}.mp4, size {size} bytes, frames: {len(images)}'
            )

        if 'gif' in types.lower():
            from pygifsicle import optimize
            g.logger.debug('Creating GIF...')

            # Let's slice the right amount from images
            # GIF uses a +- 2 second buffer
            gif_buffer_seconds = 2
            gif_start_frame = int(max(fid - (gif_buffer_seconds * fps), 1))
            gif_end_frame = int(
                min(totframes, fid + (gif_buffer_seconds * fps)))
            s1 = round((gif_start_frame - start_frame) / skip)
            s2 = round((end_frame - gif_end_frame) / skip)
            if s1 >= 0 and s2 >= 0:
                gif_images = images[0 + s1:-s2]
                g.logger.debug(
                    f'For GIF, slicing {s1} to -{s2} from a total of {len(images)}'
                )
                g.logger.debug('animation:Saving...')
                gif_final = od_images.copy()
                gif_final.extend(gif_images)
                imageio.mimwrite(fname + '.gif',
                                 gif_final,
                                 format='gif',
                                 fps=target_fps)
                g.logger.debug('animation:Optimizing...')
                optimize(source=fname + '.gif', colors=256)
                size = os.stat(fname + '.gif').st_size
                g.logger.debug(
                    f'animation: saved to {fname}.gif, size {size} bytes, frames:{len(gif_images)}'
                )
            else:
                g.logger.debug(
                    f'Bailing in GIF creation, range is weird start:{s1}:end offset {-s2}'
                )

    except Exception as e:
        g.logger.error('animation: Traceback:{}'.format(
            traceback.format_exc()))
Exemple #41
0
def write_video(path, idx, video_data, video_format):
    save_path = path + '/' + idx.split('.')[0] + '.' + video_format
    imageio.mimwrite(save_path, video_data, fps=30)
Exemple #42
0
    noise_dim=args.noise_dim,
    noise_type=args.noise_type,
    noise_mix_type=args.noise_mix_type).float().to(device)
model_file = f"./trained-models/{args.model_type}/{args.dset_name}/{args.best_k}V-{args.l}"
g_file = f"{model_file}_g.pt"

generator.load_state_dict(torch.load(g_file))

dirname = f"plots/{k}-{l}-{args.dset_name}"
if not os.path.exists(dirname): os.makedirs(dirname)

img_array = []
for b, batch in enumerate(testloader):
    if (b + 1) == 224:
        imageio.mimwrite(f"plots/{k}-{l}-{args.dset_name}/movie.gif",
                         img_array,
                         fps=2)
        exit()
    print(f"Plotting density plots for batch {b+1}/{len(testloader)}")
    sequence,target,dist_matrix,bearing_matrix,heading_matrix,ip_mask, \
    op_mask,pedestrians, scene_context, batch_mean, batch_var = batch
    if pedestrians.data < 2:
        continue
    predictions, sequence = get_prediction(batch, generator, args)
    predictions = predictions.squeeze()
    predictions = predictions.clone().detach().cpu()
    sequence = sequence.squeeze(0).clone().detach().cpu()
    target = target.squeeze(0).clone().detach().cpu()
    gt_traj = torch.cat((sequence, target), dim=1)
    xlim = [gt_traj[..., 0].min() - 1.0, gt_traj[..., 0].max() + 1.0]
    ylim = [gt_traj[..., 1].min() - 1.0, gt_traj[..., 1].max() + 1.0]