Esempio n. 1
0
class VisdomShow():
    def __init__(self, env_name):
        self.vis = Visdom(env=env_name)
        print('Visdom display initialized')
        
    def show_img(self, img):
        #img = img[(2,1,0),:,:]
        self.vis.image(np.clip(img,0,1))
        #self.vis.image(np.clip(img.data.cpu().numpy(),0,1))
    
    def show_vid(self, vid):
        vid = (np.clip(vid,0.,1.)*255.).astype(np.uint8)
        vid = np.transpose(vid[:,(2,1,0),:,:], (0,2,3,1))
        self.vis.video(vid, opts={'fps': 2})

    def add_text(self, img, text, color=(0,255,0)):
        img = np.transpose(img[(2,1,0),:,:], (1,2,0)).copy()
        cv2.putText(img, text, (2,24), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 1, cv2.LINE_AA)
        img = np.transpose(img, (2,0,1))[(2,1,0),:,:]
        return img
Esempio n. 2
0
class VisdomWriter:
    def __init__(self):
        try:
            from visdom import Visdom
        except ImportError:
            raise ImportError("Visdom visualization requires installation of Visdom")

        self.scalar_dict = {}
        self.server_connected = False
        self.vis = Visdom()
        self.windows = {}

        self._try_connect()

    def _try_connect(self):
        startup_sec = 1
        self.server_connected = self.vis.check_connection()
        while not self.server_connected and startup_sec > 0:
            time.sleep(0.1)
            startup_sec -= 0.1
            self.server_connected = self.vis.check_connection()
        assert self.server_connected, 'No connection could be formed quickly'

    @_check_connection
    def add_scalar(self, tag, scalar_value, global_step=None, main_tag='default'):
        """Add scalar data to Visdom. Plots the values in a plot titled
           {main_tag}-{tag}.

        Args:
            tag (string): Data identifier
            scalar_value (float or string/blobname): Value to save
            global_step (int): Global step value to record
            main_tag (string): Data group identifier
        """
        if self.scalar_dict.get(main_tag) is None:
            self.scalar_dict[main_tag] = {}
        exists = self.scalar_dict[main_tag].get(tag) is not None
        self.scalar_dict[main_tag][tag] = self.scalar_dict[main_tag][tag] + [scalar_value] if exists else [scalar_value]
        plot_name = '{}-{}'.format(main_tag, tag)
        # If there is no global_step provided, follow sequential order
        x_val = len(self.scalar_dict[main_tag][tag]) if not global_step else global_step
        if exists:
            # Update our existing Visdom window
            self.vis.line(
                X=make_np(x_val),
                Y=make_np(scalar_value),
                name=plot_name,
                update='append',
                win=self.windows[plot_name],
            )
        else:
            # Save the window if we are creating this graph for the first time
            self.windows[plot_name] = self.vis.line(
                X=make_np(x_val),
                Y=make_np(scalar_value),
                name=plot_name,
                opts={
                    'title': plot_name,
                    'xlabel': 'timestep',
                    'ylabel': tag,
                },
            )

    @_check_connection
    def add_scalars(self, main_tag, tag_scalar_dict, global_step=None):
        """Adds many scalar data to summary.

        Note that this function also keeps logged scalars in memory. In extreme case it explodes your RAM.

        Args:
            tag (string): Data identifier
            main_tag (string): Data group identifier
            tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values
            global_step (int): Global step value to record

        Examples::

            writer.add_scalars('run_14h',{'xsinx':i*np.sin(i/r),
                                          'xcosx':i*np.cos(i/r),
                                          'arctanx': numsteps*np.arctan(i/r)}, i)
            This function adds three plots:
                'run_14h-xsinx',
                'run_14h-xcosx',
                'run_14h-arctanx'
            with the corresponding values.
        """
        for key in tag_scalar_dict.keys():
            self.add_scalar(key, tag_scalar_dict[key], global_step, main_tag)

    @_check_connection
    def export_scalars_to_json(self, path):
        """Exports to the given 'path' an ASCII file containing all the scalars written
        so far by this instance, with the following format:
        {writer_id : [[timestamp, step, value], ...], ...}

        The scalars saved by ``add_scalars()`` will be flushed after export.
        """
        with open(path, "w") as f:
            json.dump(self.scalar_dict, f)
        self.scalar_dict = {}

    @_check_connection
    def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):
        """Add histogram to summary.

        Args:
            tag (string): Data identifier
            values (torch.Tensor, numpy.array, or string/blobname): Values to build histogram
            global_step (int): Global step value to record
            bins (string): one of {'tensorflow', 'auto', 'fd', ...}, this determines how the bins are made. You can find
              other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
        """
        values = make_np(values)
        self.vis.histogram(make_np(values), opts={'title': tag})

    @_check_connection
    def add_image(self, tag, img_tensor, global_step=None, caption=None):
        """Add image data to summary.

        Note that this requires the ``pillow`` package.

        Args:
            tag (string): Data identifier
            img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data
            global_step (int): Global step value to record
        Shape:
            img_tensor: :math:`(C, H, W)`. Use ``torchvision.utils.make_grid()`` to prepare it is a good idea.
            C = colors (can be 1 - grayscale, 3 - RGB, 4 - RGBA)
        """
        img_tensor = make_np(img_tensor)
        self.vis.image(img_tensor, opts={'title': tag, 'caption': caption})

    @_check_connection
    def add_figure(self, tag, figure, global_step=None, close=True):
        """Render matplotlib figure into an image and add it to summary.

        Note that this requires the ``matplotlib`` package.

        Args:
            tag (string): Data identifier
            figure (matplotlib.pyplot.figure) or list of figures: figure or a list of figures
            global_step (int): Global step value to record
            close (bool): Flag to automatically close the figure
        """
        self.add_image(tag, figure_to_image(figure, close), global_step)

    @_check_connection
    def add_video(self, tag, vid_tensor, global_step=None, fps=4):
        """Add video data to summary.

        Note that this requires the ``moviepy`` package.

        Args:
            tag (string): Data identifier
            vid_tensor (torch.Tensor): Video data
            global_step (int): Global step value to record
            fps (float or int): Frames per second
        Shape:
            vid_tensor: :math:`(B, C, T, H, W)`. (if following tensorboard-pytorch format)
            vid_tensor: :math:`(T, H, W, C)`. (if following visdom format)
            B = batches, C = colors (1, 3, or 4), T = time frames, H = height, W = width
        """
        shape = vid_tensor.shape
        # A batch of videos (tensorboard-pytorch format) is a 5D tensor
        if len(shape) > 4:
            for i in range(shape[0]):
                # Reshape each video to Visdom's (T x H x W x C) and write each video
                if isinstance(vid_tensor, np.ndarray):
                    ind_vid = torch.from_numpy(vid_tensor[i, :, :, :, :]).permute(1, 2, 3, 0)
                else:
                    ind_vid = vid_tensor[i, :, :, :, :].permute(1, 2, 3, 0)
                scale_factor = 255 if np.any((ind_vid > 0) & (ind_vid < 1)) else 1
                # Visdom looks for .ndim attr, this is something raw Tensors don't have
                # Cast to Numpy array to get .ndim attr
                ind_vid = ind_vid.numpy()
                ind_vid = (ind_vid * scale_factor).astype(np.uint8)
                assert ind_vid.shape[3] in [1, 3, 4], \
                    'Visdom requires the last dimension to be color, which can be 1 (grayscale), 3 (RGB) or 4 (RGBA)'
                self.vis.video(tensor=ind_vid, opts={'fps': fps})
        else:
            self.vis.video(tensor=vid_tensor, opts={'fps': fps})

    @_check_connection
    def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100):
        """Add audio data to summary.

        Args:
            tag (string): Data identifier
            snd_tensor (torch.Tensor, numpy.array, or string/blobname): Sound data
            global_step (int): Global step value to record
            sample_rate (int): sample rate in Hz

        Shape:
            snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1].
        """
        snd_tensor = make_np(snd_tensor)
        self.vis.audio(tensor=snd_tensor, opts={'sample_frequency': sample_rate})

    @_check_connection
    def add_text(self, tag, text_string, global_step=None):
        """Add text data to summary.

        Args:
            tag (string): Data identifier
            text_string (string): String to save
            global_step (int): Global step value to record
        Examples::
            writer.add_text('lstm', 'This is an lstm', 0)
            writer.add_text('rnn', 'This is an rnn', 10)
        """
        if text_string is None:
            # Visdom doesn't support tags, write the tag as the text_string
            text_string = tag
        self.vis.text(text_string)

    @_check_connection
    def add_graph_onnx(self, prototxt):
        # TODO: Visdom doesn't support graph visualization yet, so this is a no-op
        return

    @_check_connection
    def add_graph(self, model, input_to_model=None, verbose=False, **kwargs):
        # TODO: Visdom doesn't support graph visualization yet, so this is a no-op
        return

    @_check_connection
    def add_embedding(self, mat, metadata=None, label_img=None, global_step=None, tag='default', metadata_header=None):
        # TODO: Visdom doesn't support embeddings yet, so this is a no-op
        return

    @_check_connection
    def add_pr_curve(self, tag, labels, predictions, global_step=None, num_thresholds=127, weights=None):
        """Adds precision recall curve.

        Args:
            tag (string): Data identifier
            labels (torch.Tensor, numpy.array, or string/blobname): Ground truth data. Binary label for each element.
            predictions (torch.Tensor, numpy.array, or string/blobname):
            The probability that an element be classified as true. Value should in [0, 1]
            global_step (int): Global step value to record
            num_thresholds (int): Number of thresholds used to draw the curve.

        """
        labels, predictions = make_np(labels), make_np(predictions)
        raw_data = compute_curve(labels, predictions, num_thresholds, weights)

        # compute_curve returns np.stack((tp, fp, tn, fn, precision, recall))
        # We want to access 'precision' and 'recall'
        precision, recall = raw_data[4, :], raw_data[5, :]

        self.vis.line(
            X=recall,
            Y=precision,
            name=tag,
            opts={
                'title': 'PR Curve for {}'.format(tag),
                'xlabel': 'recall',
                'ylabel': 'precision',
            },
        )

    @_check_connection
    def add_pr_curve_raw(self, tag, true_positive_counts,
                         false_positive_counts,
                         true_negative_counts,
                         false_negative_counts,
                         precision,
                         recall, global_step=None, num_thresholds=127, weights=None):
        """Adds precision recall curve with raw data.

        Args:
            tag (string): Data identifier
            true_positive_counts (torch.Tensor, numpy.array, or string/blobname): true positive counts
            false_positive_counts (torch.Tensor, numpy.array, or string/blobname): false positive counts
            true_negative_counts (torch.Tensor, numpy.array, or string/blobname): true negative counts
            false_negative_counts (torch.Tensor, numpy.array, or string/blobname): false negative counts
            precision (torch.Tensor, numpy.array, or string/blobname): precision
            recall (torch.Tensor, numpy.array, or string/blobname): recall
            global_step (int): Global step value to record
            num_thresholds (int): Number of thresholds used to draw the curve.
            see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md
        """
        precision, recall = make_np(precision), make_np(recall)
        self.vis.line(
            X=recall,
            Y=precision,
            name=tag,
            opts={
                'title': 'PR Curve for {}'.format(tag),
                'xlabel': 'recall',
                'ylabel': 'precision',
            },
        )

    def close(self):
        del self.vis
        del self.scalar_dict
        gc.collect()
Esempio n. 3
0
    # matplotlib demo:
    try:
        import matplotlib.pyplot as plt
        plt.plot([1, 23, 2, 4])
        plt.ylabel('some numbers')
        viz.matplot(plt)
    except BaseException as err:
        print('Skipped matplotlib example')
        print('Error message: ', err)

    # video demo:
    try:
        video = np.empty([256, 250, 250, 3], dtype=np.uint8)
        for n in range(256):
            video[n, :, :, :].fill(n)
        viz.video(tensor=video)

        # video demo:
        # download video from http://media.w3.org/2010/05/sintel/trailer.ogv
        video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
        # linux
        if _platform == "linux" or _platform == "linux2":
            videofile = '/home/%s/trailer.ogv' % getpass.getuser()
        # MAC OS X
        elif _platform == "darwin":
            videofile = '/Users/%s/trailer.ogv' % getpass.getuser()
        # download video
        urllib.request.urlretrieve(video_url, videofile)

        if os.path.isfile(videofile):
            viz.video(videofile=videofile)
Esempio n. 4
0
    if args.mode == None:
        args.mode = args.w.split('/')[-1][14:18]
    file_path = args.f_name
    # 视频存储位置
    # file_savepath = './post_video/'+args.f_name.split('/')[-1].split('.')[0]+args.mode+'_'+args.w.split('.')[0][-7:]+'.'+args.f_name.split('.')[-1]
    file_savepath = './post_video/' + args.f_name.split('/')[-1].split('.')[0] + args.mode + '_' + args.w.split('_')[-1][:7] + '.avi'
    print(os.path.exists(file_path))

    if not os.path.exists('./post_video/'):
        os.mkdir('./post_video/')

    # crop的人脸与对应mask
    face_savepath = './face_detected/'
    if not os.path.exists(face_savepath):
        os.mkdir(face_savepath)
    # 原图与完成的假笑图
    TmpPicSavePath = './face2pic/'
    if not os.path.exists(TmpPicSavePath):
        os.mkdir(TmpPicSavePath)

    if args.step == 1:
        video2ph(file_path, face_savepath, init_path=TmpPicSavePath, args=args)
        args.step += 1
    i = 0
    if args.step == 2:
        model_process(args, read_path=face_savepath, TmpPicSavePath=TmpPicSavePath)

    ph2video(video_savepath=file_savepath, photo_path=TmpPicSavePath)

    vis.video(videofile=file_savepath, win=file_savepath)
Esempio n. 5
0
    # matplotlib demo:
    try:
        import matplotlib.pyplot as plt
        plt.plot([1, 23, 2, 4])
        plt.ylabel('some numbers')
        viz.matplot(plt)
    except BaseException as err:
        print('Skipped matplotlib example')
        print('Error message: ', err)

    # video demo:
    try:
        video = np.empty([256, 250, 250, 3], dtype=np.uint8)
        for n in range(256):
            video[n, :, :, :].fill(n)
        viz.video(tensor=video)
    except BaseException:
        print('Skipped video tensor example')

    try:
        # video demo:
        # download video from http://media.w3.org/2010/05/sintel/trailer.ogv
        video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
        videofile = os.path.join(tempfile.gettempdir(), 'trailer.ogv')
        urllib.request.urlretrieve(video_url, videofile)

        if os.path.isfile(videofile):
            viz.video(videofile=videofile, opts={'width': 864, 'height': 480})
    except BaseException:
        print('Skipped video file example')
Esempio n. 6
0
import numpy as np
from PIL import Image
import torchvision.transforms as transforms

viz = Visdom()

try:
    # video demo: download video from http://media.w3.org/2010/05/sintel/trailer.ogv
    #video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
    # linux
    if _platform == "linux" or _platform == "linux2":
        videofile = '/workspace/visdom/trailer.ogv'
    ## MAC OS X
    #elif _platform == "darwin":
    #    videofile = '/Users/%s/trailer.ogv' % getpass.getuser()
    # download video
    #urllib.request.urlretrieve(video_url, videofile)

    if os.path.isfile(videofile):
        viz.video(videofile=videofile)
except ImportError:
    print('Skipped video example')

#assert viz.check_connection()
#viz.close()

# image demo
image = Image.open('/workspace/visdom/rnet_faces.png')
image = transforms.ToTensor()(image)
viz.image(img=image, win='image', opts={'title': 'show image'})
Esempio n. 7
0
from visdom import Visdom
import numpy as np
import math
import os.path
import getpass

viz = Visdom()

textwindow = viz.text('Hello World!')

# video demo:
try:
    video = np.empty([256, 250, 250, 3], dtype=np.uint8)
    for n in range(256):
        video[n, :, :, :].fill(n)
    viz.video(tensor=video)

    # video demo: download video from http://media.w3.org/2010/05/sintel/trailer.ogv
    videofile = '/home/%s/trailer.ogv' % getpass.getuser()
    if os.path.isfile(videofile):
        viz.video(videofile=videofile)
except ImportError:
    print('Skipped video example')

# image demo
viz.image(
    np.random.rand(3, 512, 256),
    opts=dict(title='Random!', caption='How random.'),
)

# scatter plots
    try:
        import matplotlib.pyplot as plt

        plt.plot([1, 23, 2, 4])
        plt.ylabel('some numbers')
        viz.matplot(plt)
    except BaseException as err:
        print('Skipped matplotlib example')
        print('Error message: ', err)

    # video demo:
    try:
        video = np.empty([256, 250, 250, 3], dtype=np.uint8)
        for n in range(256):
            video[n, :, :, :].fill(n)
        viz.video(tensor=video)
    except BaseException:
        print('Skipped video tensor example')

    # try:
    #     # video demo:
    #     # download video from http://media.w3.org/2010/05/sintel/trailer.ogv
    #     video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
    #     videofile = os.path.join(tempfile.gettempdir(), 'trailer.ogv')
    #     urllib.request.urlretrieve(video_url, videofile)
    #
    #     if os.path.isfile(videofile):
    #         viz.video(videofile=videofile, opts={'width': 864, 'height': 480})
    # except BaseException:
    #     print('Skipped video file example')
Esempio n. 9
0
class Grapher(object):
    ''' A helper class to assist with plotting to visdom '''
    def __init__(self, env, server, port=8097):
        self.vis = Visdom(server=server, port=port, env=env)
        self.env = env
        self.param_map = self._init_map()
        self.function_map = {
            'line': self._plot_line,
            'imgs': self._plot_imgs,
            'img': self._plot_img,
            'hist': self._plot_hist,
            'video': self._plot_video
        }

        # this is persisted through the lifespan of the object
        # it contains the window objects
        self.registered_lines = {}

    def save(self):
        self.vis.save([self.env])

    def _init_map(self):
        ''' Internal member to return a map of lists '''
        return {'line': [], 'imgs': [], 'img': [], 'video': [], 'hist': []}

    def clear(self):
        '''Helper to clear and reset the internal map'''
        if hasattr(self, 'param_map'):
            self.param_map.clear()

        self.param_map = self._init_map()

    def _plot_img(self, img_list):
        for img_map in img_list:
            for key, value in img_map.items():
                self.vis.image(to_data(value).detach().cpu().numpy(),
                               opts=dict(title=key),
                               win=key)

    def _plot_imgs(self, imgs_list):
        for imgs_map in imgs_list:
            for key, value in imgs_map.items():
                self.vis.images(to_data(value).detach().cpu().numpy(),
                                opts=dict(title=key),
                                win=key)

    def _plot_line(self, line_list):
        for line_map in line_list:
            for key, value in line_map.items():
                x = np.asarray(value[0])  # time-point
                y = np.asarray(value[1])  # value
                if len(y.shape) < 1:
                    y = np.expand_dims(y, -1)

                if len(x.shape) < 1:
                    x = np.expand_dims(x, -1)

                if key not in self.registered_lines:
                    self.registered_lines[key] = self.vis.line(
                        Y=y, X=x, opts=dict(title=key), win=key)
                else:
                    self.vis.line(Y=y,
                                  X=x,
                                  opts=dict(title=key),
                                  win=self.registered_lines[key],
                                  update='append')

    def _plot_hist(self, hist_list):
        for hist_map in hist_list:
            for key, value in hist_map.items():
                numbins = value[0]
                hist_value = value[1]
                self.vis.histogram(hist_value,
                                   opts=dict(title=key, numbins=numbins),
                                   win=key)

    def _plot_video(self, video_list):
        for video_map in video_list:
            for key, value in video_map.item():
                assert isinstance(value, torch.Tensor), "files not supported"
                self.vis.video(tensor=to_data(value),
                               opts=dict(title=key),
                               win=key)

    def register(self, param_map, plot_types, override=True):
        ''' submit bulk map here, see register_single for detail '''
        assert len(param_map) == len(plot_types)
        if type(override) != list:
            override = [override] * len(param_map)

        for pm, pt, o in zip(param_map, plot_types, override):
            self.register_single(pm, pt, o)

    def _find_and_append(self, param_map, plot_type):
        assert plot_type == 'line', "only line append supported currently"
        exists = False
        for i in range(len(self.param_map[plot_type])):
            list_item = self.param_map[plot_type]
            for key, value in param_map.items():
                for j in range(len(list_item)):
                    if key in list_item[j]:
                        list_item[j][key][0].extend(value[0])
                        list_item[j][key][1].extend(value[1])
                        exists = True

        if not exists:
            self.param_map[plot_type].append(param_map)

    def _find_and_replace(self, param_map, plot_type):
        exists = False
        for i in range(len(self.param_map[plot_type])):
            list_item = self.param_map[plot_type]
            for key, value in param_map.items():
                for j in range(len(list_item)):
                    if key in list_item[j]:
                        list_item[j][key] = value
                        exists = True

        if not exists:
            self.param_map[plot_type].append(param_map)

    def register_single(self,
                        param_map,
                        plot_type='line',
                        append=False,
                        override=True):
        ''' register a single plot which will be added to the current map
            eg: register({'title': value}, 'line')

            plot_type: 'line', 'hist', 'imgs', 'img', 'video'
            override : if True then overwrite an item if it exists
            append   : if True appends to the line. This is mainly useful
                       useful if you are extending a line before show()

            Note: you can't override and append
        '''
        assert len(param_map) == 1, "only one register per call"
        assert not (override is True
                    and append is True), "cant override and append"

        plot_type = plot_type.lower().strip()
        assert plot_type == 'line' \
            or plot_type == 'hist' \
            or plot_type == 'imgs' \
            or plot_type == 'img' \
            or plot_type == 'video'

        if append:
            self._find_and_append(param_map, plot_type)

        if override:
            self._find_and_replace(param_map, plot_type)

    def _check_exists(self, plot_type, param_map):
        for key, _ in param_map.items():  # {'name', value}
            for list_item in self.param_map[
                    plot_type]:  # [{'name': value}, {'name2': value2}]
                return key not in list_item

    def show(self, clear=True):
        ''' This helper is called to actually push the data to visdom'''
        for key, value_list in self.param_map.items():
            self.function_map[key](value_list)

        if clear:  # helper to clear the plot map
            self._init_map()
Esempio n. 10
0
import os
import numpy as np

viz = Visdom(server='http://[::1]', port=8097, env='test')
assert viz.check_connection()

# 视频下载可能比较慢,耐心等几分中
video_file = "./DATA//trailer.ogv"
# if not os.path.exists(video_file):
#     video_url = 'http://media.w3.org/2010/05/sintel/trailer.ogv'
#     res = requests.get(video_url)
#     with open(video_file, "wb") as f:
#         f.write(res.content)
# with open(video_file, "wb") as f:
#     f.write(res.content)
viz.video(videofile=video_file, win='win_video')

# 图片
# 单张图片
viz.image(
    np.random.rand(3, 256, 256),  #格式 C*H*W
    win='win_image',
    opts={
        'title': 'single_image',
        'showlegend': True
    })
# 多张图片
viz.images(np.random.rand(20, 3, 64, 64),
           win='win_images',
           opts={
               'title': 'multi-images',