Ejemplo n.º 1
0
    def test_cifar10_per_class(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        os.makedirs(outdir, exist_ok=True)

        import matplotlib.pylab as plt
        from template_lib.d2.data.build_cifar10_per_class import data_path, kwargs_list
        from detectron2.data import MetadataCatalog

        pass
Ejemplo n.º 2
0
    def test_ddp(self):
        """
    Usage:

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config import get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, \
          start_cmd_run

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file exp/configs/styleganv2.yaml
                --tl_command {command}
                --tl_outdir {outdir}
                """
        args = setup_outdir_and_yaml(argv_str)

        nproc_per_node = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
        cmd_str = f"""
        python -m torch.distributed.launch --nproc_per_node={nproc_per_node} --master_port=8888 
          exp/scripts/train.py 
          {get_append_cmd_str(args)}
        """
        start_cmd_run(cmd_str)
        pass
Ejemplo n.º 3
0
  def test_summary_defaultdict2txtfig(self):
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
      os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if 'PORT' not in os.environ:
      os.environ['PORT'] = '6006'
    if 'TIME_STR' not in os.environ:
      os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
    # func name
    assert sys._getframe().f_code.co_name.startswith('test_')
    command = sys._getframe().f_code.co_name[5:]
    class_name = self.__class__.__name__[7:] \
      if self.__class__.__name__.startswith('Testing') \
      else self.__class__.__name__
    outdir = f'results/{class_name}/{command}'

    argv_str = f"""
                --config template_lib/configs/config.yaml
                --command {command}
                --outdir {outdir}
                """
    from template_lib.utils.config import parse_args_and_setup_myargs, config2args
    args, myargs, _ = parse_args_and_setup_myargs(argv_str, start_tb=False)

    prefix = 'test_summary_scalars'
    import collections
    summary_dict = collections.defaultdict(dict)
    for step in range(1000):
      summary = {'a': step, 'b': step + 1}
      for i in range(20):
        summary_dict[f'dict{i}'] = summary
      Trainer.summary_defaultdict2txtfig(default_dict=summary_dict, prefix=prefix, step=step,
                                         textlogger=myargs.textlogger, in_one_figure=False)
      Trainer.summary_defaultdict2txtfig(default_dict=summary_dict, prefix=prefix, step=step,
                                         textlogger=myargs.textlogger, in_one_figure=True)

    return
Ejemplo n.º 4
0
  def test_gcn(self):
    """

    """
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
      os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if 'PORT' not in os.environ:
      os.environ['PORT'] = '6006'
    if 'TIME_STR' not in os.environ:
      os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
    # func name
    assert sys._getframe().f_code.co_name.startswith('test_')
    command = sys._getframe().f_code.co_name[5:]
    class_name = self.__class__.__name__[7:] \
      if self.__class__.__name__.startswith('Testing') \
      else self.__class__.__name__
    outdir = f'results/{class_name}/{command}'
    import shutil
    shutil.rmtree(outdir, ignore_errors=True)
    os.makedirs(outdir, exist_ok=True)

    from template_lib.examples.DGL import test_gcn

    pass
Ejemplo n.º 5
0
    def test_OneCycleLR(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                """
        args = setup_outdir_and_yaml(argv_str, return_cfg=True)

        import torch.nn as nn
        from torch.optim import lr_scheduler
        from matplotlib import pyplot as plt

        model = nn.Linear(3, 64)

        def create_optimizer():
            return SGD(model.parameters(),
                       lr=0.1,
                       momentum=0.9,
                       weight_decay=1e-4)

        def plot_lr(scheduler, title='', labels=['base'], nrof_epoch=100):
            lr_li = [[] for _ in range(len(labels))]
            epoch_li = list(range(nrof_epoch))
            for epoch in epoch_li:
                scheduler.step()  # 调用step()方法,计算和更新optimizer管理的参数基于当前epoch的学习率
                lr = scheduler.get_last_lr()  # 获取当前epoch的学习率
                for i in range(len(labels)):
                    lr_li[i].append(lr[i])
            for lr, label in zip(lr_li, labels):
                plt.plot(epoch_li, lr, label=label)
            plt.grid()
            plt.xlabel('epoch')
            plt.ylabel('lr')
            plt.title(title)
            plt.legend()
            plt.show()

        optimizer = create_optimizer()
        scheduler = lr_scheduler.OneCycleLR(optimizer, 0.1, total_steps=100)
        plot_lr(scheduler, title='OneCycleLR')
        pass
Ejemplo n.º 6
0
    def test_base_usage(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                """
        args = setup_outdir_and_yaml(argv_str, return_cfg=True)

        import torch
        import numpy as np
        import warnings
        warnings.filterwarnings('ignore')  # ignore warnings

        x = torch.linspace(-np.pi, np.pi, 2000)
        y = torch.sin(x)

        p = torch.tensor([1, 2, 3])
        xx = x.unsqueeze(-1).pow(p)

        model = torch.nn.Sequential(torch.nn.Linear(3, 1),
                                    torch.nn.Flatten(0, 1))
        loss_fn = torch.nn.MSELoss(reduction='sum')

        learning_rate = 1e-3
        optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
        for t in range(1, 1001):
            y_pred = model(xx)
            loss = loss_fn(y_pred, y)
            if t % 100 == 0:
                print('No.{: 5d}, loss: {:.6f}'.format(t, loss.item()))
            optimizer.zero_grad()  # 梯度清零
            loss.backward()  # 反向传播计算梯度
            optimizer.step()  # 梯度下降法更新参数

        pass
    def test_surface_3d_1(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from template_lib.utils.plot_results import PlotResults
        import collections, shutil

        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        from mpl_toolkits.mplot3d import Axes3D
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from matplotlib.ticker import LinearLocator, FormatStrFormatter
        import numpy as np

        fig = plt.figure()
        ax = fig.gca(projection='3d')

        # Make data.
        X = np.arange(-5, 5, 0.1)
        Y = np.arange(-5, 5, 0.1)
        X, Y = np.meshgrid(X, Y)
        R = np.sqrt(X**2 + Y**2)
        Z = np.sin(R)

        # Plot the surface.
        surf = ax.plot_surface(X,
                               Y,
                               Z,
                               cmap=cm.coolwarm,
                               rstride=1,
                               cstride=1,
                               linewidth=0,
                               antialiased=False)

        # Customize the z axis.
        ax.set_zlim(-1.01, 1.01)
        ax.zaxis.set_major_locator(LinearLocator(10))
        ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))

        # Add a color bar which maps values to colors.
        fig.colorbar(surf, shrink=0.5, aspect=5)

        plt.show()

        pass
    def test_subplot_surface_3d(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from template_lib.utils.plot_results import PlotResults
        import collections, shutil

        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d.axes3d import Axes3D, get_test_data
        from matplotlib import cm, colors
        import numpy as np

        # set up a figure twice as wide as it is tall
        fig = plt.figure(figsize=plt.figaspect(0.5))

        # ===============
        #  First subplot
        # ===============
        # set up the axes for the first plot
        ax = fig.add_subplot(1, 2, 1, projection='3d')

        # plot a 3D surface like in the example mplot3d/surface3d_demo
        X = np.arange(-5, 5, 0.25)
        Y = np.arange(-5, 5, 0.25)
        X, Y = np.meshgrid(X, Y)
        R = np.sqrt(X**2 + Y**2)
        Z = np.sin(R)
        surf = ax.plot_surface(X,
                               Y,
                               Z,
                               rstride=1,
                               cstride=1,
                               cmap=cm.coolwarm,
                               linewidth=0,
                               antialiased=False)
        ax.set_zlim(-1.01, 1.01)

        # fig.colorbar(surf, shrink=0.5, aspect=10)

        # ===============
        # Second subplot
        # ===============
        # set up the axes for the second plot
        ax2 = fig.add_subplot(1, 2, 2, projection='3d')

        # plot a 3D wireframe like in the example mplot3d/wire3d_demo
        X, Y, Z = get_test_data(0.05)
        Z = np.ones_like(Z)
        # ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
        surf2 = ax2.plot_surface(X,
                                 Y,
                                 Z,
                                 rstride=1,
                                 cstride=1,
                                 cmap=cm.coolwarm,
                                 linewidth=0,
                                 antialiased=False)

        # fig.colorbar(surf, shrink=0.5, aspect=10)

        norm = colors.Normalize(vmin=-1, vmax=1)
        surf.set_norm(norm)
        surf2.set_norm(norm)
        fig.colorbar(surf2,
                     orientation='vertical',
                     fraction=.1,
                     shrink=0.5,
                     aspect=10,
                     anchor=(0, 0.5))
        # fig.colorbar(surf, ax=[ax, ax2], orientation='horizontal', fraction=.1, shrink=0.5, aspect=10)

        fig.tight_layout()
        plt.show()
        pass
Ejemplo n.º 9
0
    def test_MixedLayerCond(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
      layer:
        name: "MixedLayerCond"
        in_channels: "kwargs['in_channels']"
        out_channels: "kwargs['out_channels']"
        cfg_ops: "kwargs['cfg_ops']"
        cfg_bn:
          name: "BatchNorm2d"
          num_features: "kwargs['num_features']"
          affine: true
          track_running_stats: true
        cfg_act:
          name: "ReLU"
      cfg_ops:
        SNConv2d_3x3:
          name: "SNConv2d"
          in_channels: "kwargs['in_channels']"
          out_channels: "kwargs['out_channels']"
          kernel_size: 3
          padding: 1
        Conv2d_3x3:
          name: "Conv2d"
          in_channels: "kwargs['in_channels']"
          out_channels: "kwargs['out_channels']"
          kernel_size: 3
          padding: 1
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))
        op = build_d2layer(cfg.layer,
                           in_channels=8,
                           out_channels=8,
                           cfg_ops=cfg.cfg_ops)
        num_classes = 2
        bs = num_classes
        num_ops = 2

        op.cuda()
        x = torch.randn(bs, 8, 32, 32).cuda()
        y = torch.arange(bs).cuda()
        sample_arc = torch.arange(num_ops).cuda()
        x = op(x, y, sample_arc)
        pass
Ejemplo n.º 10
0
    def test_hook_for_grad_cam(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
        cmd_str = f"""
        python 
        template_lib/proj/pytorch/examples/hook_for_grad_cam.py
        {get_append_cmd_str(args)}
        """
        if debug:
            cmd_str += f"""
                  --tl_debug
                  --tl_opts 
                  """
        else:
            cmd_str += f"""
                  --tl_opts {tl_opts}
                  """
        start_cmd_run(cmd_str)
        # from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg
        # from template_lib.modelarts import modelarts_utils
        # update_parser_defaults_from_yaml(parser)

        # modelarts_utils.setup_tl_outdir_obs(global_cfg)
        # modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)
        # modelarts_utils.prepare_dataset(global_cfg.get('modelarts_download', {}), global_cfg=global_cfg)
        #
        # modelarts_utils.prepare_dataset(global_cfg.get('modelarts_upload', {}), global_cfg=global_cfg, download=False)
        # modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)

        pass
Ejemplo n.º 11
0
    def test_extract_ImageNet_1000x50(self):
        """
    Usage:
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree
        cd /cache/$proj_root

        export CUDA_VISIBLE_DEVICES=0
        export TIME_STR=0
        export PYTHONPATH=./
        python -c "from template_lib.proj.imagenet.tests.test_imagenet import Testing_PrepareImageNet;\
          Testing_PrepareImageNet().test_extract_ImageNet_1000x50()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)
        from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg
        from template_lib.modelarts import modelarts_utils

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file template_lib/proj/imagenet/tests/configs/PrepareImageNet.yaml
                --tl_command {command}
                --tl_outdir {outdir}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)
        global_cfg.merge_from_dict(cfg)
        global_cfg.merge_from_dict(vars(args))

        modelarts_utils.setup_tl_outdir_obs(global_cfg)
        modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)
        modelarts_utils.prepare_dataset(global_cfg.get('modelarts_download',
                                                       {}),
                                        global_cfg=global_cfg)

        train_dir = f'{cfg.data_dir}/train'
        counter_cls = 0
        for rootdir, subdir, files in os.walk(train_dir):
            if len(subdir) == 0:
                counter_cls += 1
                extracted_files = sorted(files)[:cfg.num_per_class]
                for file in tqdm.tqdm(extracted_files,
                                      desc=f'class: {counter_cls}'):
                    img_path = os.path.join(rootdir, file)
                    img_rel_path = os.path.relpath(img_path, cfg.data_dir)
                    saved_img_path = f'{cfg.saved_dir}/{os.path.dirname(img_rel_path)}'
                    os.makedirs(saved_img_path, exist_ok=True)
                    shutil.copy(img_path, saved_img_path)
            pass

        modelarts_utils.prepare_dataset(global_cfg.get('modelarts_upload', {}),
                                        global_cfg=global_cfg,
                                        download=False)
        modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)
        pass
Ejemplo n.º 12
0
    def test_extract_ImageNet100_CMC(self):
        """
    Usage:
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree
        cd /cache/$proj_root

        export CUDA_VISIBLE_DEVICES=0
        export TIME_STR=0
        export PYTHONPATH=./
        python -c "from template_lib.proj.imagenet.tests.test_imagenet import Testing_PrepareImageNet;\
          Testing_PrepareImageNet().test_extract_ImageNet100_CMC()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)
        from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg
        from template_lib.modelarts import modelarts_utils
        from distutils.dir_util import copy_tree

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file template_lib/proj/imagenet/tests/configs/PrepareImageNet.yaml
                --tl_command {command}
                --tl_outdir {outdir}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        modelarts_utils.setup_tl_outdir_obs(global_cfg)
        modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)
        modelarts_utils.prepare_dataset(global_cfg.get('modelarts_download',
                                                       {}),
                                        global_cfg=global_cfg)

        train_dir = f'{cfg.data_dir}/train'
        val_dir = f'{cfg.data_dir}/val'
        save_train_dir = f'{cfg.saved_dir}/train'
        save_val_dir = f'{cfg.saved_dir}/val'
        os.makedirs(save_train_dir, exist_ok=True)
        os.makedirs(save_val_dir, exist_ok=True)

        with open(cfg.class_list_file, 'r') as f:
            class_list = f.readlines()
        for class_subdir in tqdm.tqdm(class_list):
            class_subdir, _ = class_subdir.strip().split()
            train_class_dir = f'{train_dir}/{class_subdir}'
            save_train_class_dir = f'{save_train_dir}/{class_subdir}'
            copy_tree(train_class_dir, save_train_class_dir)

            val_class_dir = f'{val_dir}/{class_subdir}'
            save_val_class_dir = f'{save_val_dir}/{class_subdir}'
            copy_tree(val_class_dir, save_val_class_dir)

        modelarts_utils.prepare_dataset(global_cfg.get('modelarts_upload', {}),
                                        global_cfg=global_cfg,
                                        download=False)
        modelarts_utils.modelarts_sync_results_dir(global_cfg, join=True)
        pass
Ejemplo n.º 13
0
    def test_plot_FID_IS(self):
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                    --tl_config_file none
                    --tl_command none
                    --tl_outdir {outdir}
                    """
        args = setup_outdir_and_yaml(argv_str)
        outdir = args.tl_outdir

        from template_lib.utils.plot_results import PlotResults
        import collections

        outfigure = os.path.join(outdir, 'FID_IS.jpg')
        default_dicts = []
        show_max = []

        FID_c100 = collections.defaultdict(dict)
        title = 'FID_c100'
        log_file = 'textdir/evaltf.ma0.FID_tf.log'
        dd = eval(title)
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_Omni_GAN_cifar100_3'] = \
          {'Omni-GAN-c100': log_file, }
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_BigGAN_cifar100'] = \
          {'BigGAN-c100': log_file, }

        dd['properties'] = {'title': title, 'ylim': [0, 30]}
        default_dicts.append(dd)
        show_max.append(False)

        IS_c100 = collections.defaultdict(dict)
        title = 'IS_c100'
        log_file = 'textdir/evaltf.ma1.IS_mean_tf.log'
        dd = eval(title)
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_Omni_GAN_cifar100_3'] = \
          {'Omni-GAN-c100': log_file, }
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_BigGAN_cifar100'] = \
          {'BigGAN-c100': log_file, }

        dd['properties'] = {
            'title': title,
        }
        default_dicts.append(dd)
        show_max.append(True)

        FID_c10 = collections.defaultdict(dict)
        title = 'FID_c10'
        log_file = 'textdir/evaltf.ma0.FID_tf.log'
        dd = eval(title)
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_Omni_GAN_cifar10'] = \
          {'Omni-GAN-c10': log_file, }
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_BigGAN_cifar10'] = \
          {'BigGAN-c10': log_file, }

        dd['properties'] = {'title': title, 'ylim': [0, 30]}
        default_dicts.append(dd)
        show_max.append(False)

        IS_c10 = collections.defaultdict(dict)
        title = 'IS_c10'
        log_file = 'textdir/evaltf.ma1.IS_mean_tf.log'
        dd = eval(title)
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_Omni_GAN_cifar10'] = \
          {'Omni-GAN-c10': log_file, }
        dd['/home/user321/user/code_sync/Omni-GAN-PyTorch/results/train_BigGAN_cifar10'] = \
          {'BigGAN-c10': log_file, }

        dd['properties'] = {
            'title': title,
        }
        default_dicts.append(dd)
        show_max.append(True)

        plotobs = PlotResults()
        label2datas_list = plotobs.plot_defaultdicts(
            outfigure=outfigure,
            default_dicts=default_dicts,
            show_max=show_max,
            figsize_wh=(16, 7.2))
        print(f'Save to {outfigure}.')
        pass
Ejemplo n.º 14
0
    def test__select_images(self, debug=True):
        """
    Usage:
        ssh -o ServerAliveInterval=30 -o ServerAliveCountMax=2 root@localhost -p 2232

        export CUDA_VISIBLE_DEVICES=4
        export TIME_STR=1
        export PYTHONPATH=./:./ada_lib
        python -c "from exp.tests.test_ada_ultra import Testing_train_StyleUltraGAN_ADA_Transfer_MixedFaces;\
          Testing_train_StyleUltraGAN_ADA_Transfer_MixedFaces().test_projector_web(debug=False)" \
          --tl_opts port 8530 start_web True

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)
        from pathlib import Path
        import shutil

        domain_dict = {}

        domain_dict['lso_sm'] = """
x0_lso: results/train_StyleUltraGAN_ADA_Transfer_MixedFaces/double_inversion_web-20210604_093503_613/exp/0024/29218_lso.jpg,
x1_lso: results/train_StyleUltraGAN_ADA_Transfer_MixedFaces/double_inversion_web-20210604_093503_613/exp/0014/29438_lso.jpg,
        """
        suffix_str = [''] * 2

        for domain, image_list in domain_dict.items():
            image_list = image_list.split(',')
            for item, suffix in zip(image_list, suffix_str):
                layer_name, image_path = item.strip().split(':')
                layer_name = layer_name.strip().replace(' ', '_')
                image_path = Path(image_path.strip())

                select_path = Path(
                    f"{image_path.parent}/{image_path.stem}{suffix}.jpg")
                saved_path = Path(
                    f"{outdir}/images/{domain}/{layer_name}_{select_path.name}"
                )

                os.makedirs(saved_path.parent, exist_ok=True)
                shutil.copy(select_path, saved_path)

        print(outdir)
        pass
Ejemplo n.º 15
0
    def test_BERT_example(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from datetime import datetime
        TIME_STR = bool(int(os.getenv('TIME_STR', 0)))
        time_str = datetime.now().strftime("%Y%m%d-%H_%M_%S_%f")[:-3]
        outdir = outdir if not TIME_STR else (outdir + '_' + time_str)
        print(outdir)

        import collections, shutil
        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        import torch
        from transformers import BertTokenizer, BertModel, BertForMaskedLM

        # OPTIONAL: if you want to have more information on what's happening under the hood, activate the logger as follows
        import logging
        logging.basicConfig(level=logging.INFO)

        # Load pre-trained model tokenizer (vocabulary)
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

        # Tokenize input
        text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
        tokenized_text = tokenizer.tokenize(text)

        # Mask a token that we will try to predict back with `BertForMaskedLM`
        masked_index = 8
        tokenized_text[masked_index] = '[MASK]'
        assert tokenized_text == [
            '[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim',
            '[MASK]', 'was', 'a', 'puppet', '##eer', '[SEP]'
        ]

        # Convert token to vocabulary indices
        indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
        # Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
        segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]

        # Convert inputs to PyTorch tensors
        tokens_tensor = torch.tensor([indexed_tokens])
        segments_tensors = torch.tensor([segments_ids])
        pass

        # Load pre-trained model (weights)
        model = BertModel.from_pretrained('bert-base-uncased')

        # Set the model in evaluation mode to deactivate the DropOut modules
        # This is IMPORTANT to have reproducible results during evaluation!
        model.eval()

        # If you have a GPU, put everything on cuda
        tokens_tensor = tokens_tensor.to('cuda')
        segments_tensors = segments_tensors.to('cuda')
        model.to('cuda')

        # Predict hidden states features for each layer
        with torch.no_grad():
            # See the models docstrings for the detail of the inputs
            outputs = model(tokens_tensor, token_type_ids=segments_tensors)
            # Transformers models always output tuples.
            # See the models docstrings for the detail of all the outputs
            # In our case, the first element is the hidden state of the last layer of the Bert model
            encoded_layers = outputs[0]
        # We have encoded our input sequence in a FloatTensor of shape (batch size, sequence length, model hidden dimension)
        assert tuple(encoded_layers.shape) == (1, len(indexed_tokens),
                                               model.config.hidden_size)
        pass

        # Load pre-trained model (weights)
        model = BertForMaskedLM.from_pretrained('bert-base-uncased')
        model.eval()

        # If you have a GPU, put everything on cuda
        tokens_tensor = tokens_tensor.to('cuda')
        segments_tensors = segments_tensors.to('cuda')
        model.to('cuda')

        # Predict all tokens
        with torch.no_grad():
            outputs = model(tokens_tensor, token_type_ids=segments_tensors)
            predictions = outputs[0]

        # confirm we were able to predict 'henson'
        predicted_index = torch.argmax(predictions[0, masked_index]).item()
        predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
        assert predicted_token == 'henson'
Ejemplo n.º 16
0
    def test_add_text_in_tensor(self, debug=True):
        """
    Usage:
        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
        import torchvision.transforms.functional as trans_f

        img_path = "template_lib/datasets/images/zebra_GT_target_origin.png"
        left = [80, 300]
        upper = [120, 140]
        w = 50
        h = 20
        pad = 2

        img = Image.open(img_path)

        img = sr_merge_original_image_and_patches(img,
                                                  lefts=left,
                                                  uppers=upper,
                                                  w=w,
                                                  h=h,
                                                  pad=pad)

        img_tensor = trans_f.to_tensor(img)

        img_tensor_text = add_text_in_tensor(img_tensor=img_tensor,
                                             text='Image',
                                             xy=(5, 5))

        img_pil = trans_f.to_pil_image(img_tensor_text)

        fig, axes = plt.subplots()
        axes.imshow(img_pil)
        fig.show()
        pass
Ejemplo n.º 17
0
    def test_ConvNet(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)
        from template_lib.proj.pytorch.pytorch_hook import VerboseModel

        conv_net_new = nn.Sequential(
            collections.OrderedDict([
                ('conv1', nn.Conv2d(1, 10, kernel_size=5, padding=2)),
                ('pool1', nn.MaxPool2d(kernel_size=2)), ('relu1', nn.ReLU()),
                ('conv2', nn.Conv2d(10, 20, kernel_size=5, padding=2)),
                ('pool2', nn.MaxPool2d(kernel_size=2)), ('relu2', nn.ReLU()),
                ('dropout1', nn.Dropout2d()),
                ('rerange', Rearrange('b c h w -> b (c h w)')),
                ('linear1', nn.Linear(320, 50)), ('relu3', nn.ReLU()),
                ('dropout2', nn.Dropout()), ('linear', nn.Linear(50, 10)),
                ('logsoftmax', nn.LogSoftmax(dim=1))
            ]))

        net = VerboseModel(model=conv_net_new)

        x = torch.rand(1, 1, 16, 16)
        out = net(x)
        pass
Ejemplo n.º 18
0
  def test_plot_lines_figure(self):
    """
    Usage:
        export LD_LIBRARY_PATH=~/anaconda3/envs/py36/lib/
        export TIME_STR=1
        export PYTHONPATH=./exp:./BigGAN_PyTorch_1_lib:./
        python -c "from exp.tests.test_BigGAN import TestingCIFAR10_BigGAN_v1;\
          TestingCIFAR10_BigGAN_v1().test_save_FID_cbn_index_012_figure()"

    :return:
    """
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
      os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if 'TIME_STR' not in os.environ:
      os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
    from template_lib.v2.config import get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, \
      start_cmd_run

    command, outdir = get_command_and_outdir(self, func_name=sys._getframe().f_code.co_name, file=__file__)
    argv_str = f"""
                    --tl_config_file exp/configs/BigGAN_v1.yaml
                    --tl_command {command}
                    --tl_outdir {outdir}
                    """
    args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

    import matplotlib.pyplot as plt
    import numpy as np
    from template_lib.utils import colors_dict

    fig, ax = plt.subplots()

    ax.set_xticks(range(0, 600, 100))
    ax.tick_params(labelsize=cfg.fontsize.tick_fs)
    ax.set_xlabel(cfg.xlabel, fontsize=cfg.fontsize.xylabel_fs)
    ax.set_ylabel(cfg.ylabel, fontsize=cfg.fontsize.xylabel_fs)

    colors = list(colors_dict.values())
    # colors = [plt.cm.cool(i / float(num_plot - 1)) for i in range(num_plot)]

    ax.set(**cfg.properties)
    for idx, (_, data_dict) in enumerate(cfg.lines.items()):
      log_file = os.path.join(data_dict.result_dir, data_dict.sub_path)
      data = np.loadtxt(log_file, delimiter=':')

      if 'xlim' in cfg.properties:
        data_xlim = cfg.properties.xlim[-1]
        data = data[data[:, 0] <= data_xlim]

      if cfg.get_min_value:
        best_index = data[:, 1].argmin()
      else:
        best_index = data[:, 1].argmax()
      best_x = int(data[:, 0][best_index])
      best_y = data[:, 1][best_index]

      if cfg.add_auxi_label:
        data_dict.properties.label = f'x_{best_x}-y_{best_y:.3f}-' + getattr(data_dict.properties, 'label', '')
      ax.plot(data[:, 0], data[:, 1], color=colors[idx], **data_dict.properties)
      pass

    ax.legend(prop={'size': cfg.fontsize.legend_size})
    fig.show()
    saved_file = os.path.join(args.tl_outdir, cfg.saved_file)
    fig.savefig(saved_file, bbox_inches='tight', pad_inches=0.01)
    print(f'Save to {saved_file}')
    pass
Ejemplo n.º 19
0
    def test_transformer(self):
        """Sequence-to-Sequence Modeling with nn.Transformer and TorchText

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from datetime import datetime
        TIME_STR = bool(int(os.getenv('TIME_STR', 0)))
        time_str = datetime.now().strftime("%Y%m%d-%H_%M_%S_%f")[:-3]
        outdir = outdir if not TIME_STR else (outdir + '_' + time_str)
        print(outdir)

        import collections, shutil
        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        import math
        import torch
        import torch.nn as nn
        import torch.nn.functional as F

        class PositionalEncoding(nn.Module):
            """we use sine and cosine functions of different frequencies."""
            def __init__(self, d_model, dropout=0.1, max_len=5000):
                super(PositionalEncoding, self).__init__()
                self.dropout = nn.Dropout(p=dropout)

                pe = torch.zeros(max_len, d_model)
                position = torch.arange(0, max_len,
                                        dtype=torch.float).unsqueeze(1)
                div_term = torch.exp(
                    torch.arange(0, d_model, 2).float() *
                    (-math.log(10000.0) / d_model))
                pe[:, 0::2] = torch.sin(position * div_term)
                pe[:, 1::2] = torch.cos(position * div_term)
                pe = pe.unsqueeze(0).transpose(0, 1)
                self.register_buffer('pe', pe)

            def forward(self, x):
                x = x + self.pe[:x.size(0), :]
                return self.dropout(x)

        class TransformerModel(nn.Module):
            def __init__(self,
                         ntoken,
                         ninp,
                         nhead,
                         nhid,
                         nlayers,
                         dropout=0.5):
                super(TransformerModel, self).__init__()
                from torch.nn import TransformerEncoder, TransformerEncoderLayer
                self.model_type = 'Transformer'
                self.src_mask = None
                self.pos_encoder = PositionalEncoding(ninp, dropout)
                encoder_layers = TransformerEncoderLayer(
                    ninp, nhead, nhid, dropout)
                self.transformer_encoder = TransformerEncoder(
                    encoder_layers, nlayers)
                self.encoder = nn.Embedding(ntoken, ninp)
                self.ninp = ninp
                self.decoder = nn.Linear(ninp, ntoken)

                self.init_weights()

            def _generate_square_subsequent_mask(self, sz):
                mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
                mask = mask.float().masked_fill(mask == 0,
                                                float('-inf')).masked_fill(
                                                    mask == 1, float(0.0))
                return mask

            def init_weights(self):
                initrange = 0.1
                self.encoder.weight.data.uniform_(-initrange, initrange)
                self.decoder.bias.data.zero_()
                self.decoder.weight.data.uniform_(-initrange, initrange)

            def forward(self, src):
                if self.src_mask is None or self.src_mask.size(0) != len(src):
                    device = src.device
                    mask = self._generate_square_subsequent_mask(
                        len(src)).to(device)
                    self.src_mask = mask

                src = self.encoder(src) * math.sqrt(self.ninp)
                src = self.pos_encoder(src)
                output = self.transformer_encoder(src, self.src_mask)
                output = self.decoder(output)
                return output

        import torchtext
        from torchtext.data.utils import get_tokenizer
        TEXT = torchtext.data.Field(tokenize=get_tokenizer("basic_english"),
                                    init_token='<sos>',
                                    eos_token='<eos>',
                                    lower=True)
        train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(
            TEXT, root='datasets')
        TEXT.build_vocab(train_txt)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        def batchify(data, bsz):
            data = TEXT.numericalize([data.examples[0].text])
            # Divide the dataset into bsz parts.
            nbatch = data.size(0) // bsz
            # Trim off any extra elements that wouldn't cleanly fit (remainders).
            data = data.narrow(0, 0, nbatch * bsz)
            # Evenly divide the data across the bsz batches.
            data = data.view(bsz, -1).t().contiguous()
            return data.to(device)

        batch_size = 20
        eval_batch_size = 10
        train_data = batchify(train_txt, batch_size)
        val_data = batchify(val_txt, eval_batch_size)
        test_data = batchify(test_txt, eval_batch_size)

        bptt = 35

        def get_batch(source, i):
            seq_len = min(bptt, len(source) - 1 - i)
            data = source[i:i + seq_len]
            target = source[i + 1:i + 1 + seq_len].view(-1)
            return data, target

        ntokens = len(TEXT.vocab.stoi)  # the size of vocabulary
        emsize = 200  # embedding dimension
        nhid = 200  # the dimension of the feedforward network model in nn.TransformerEncoder
        nlayers = 2  # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
        nhead = 2  # the number of heads in the multiheadattention models
        dropout = 0.2  # the dropout value
        model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers,
                                 dropout).to(device)

        criterion = nn.CrossEntropyLoss()
        lr = 5.0  # learning rate
        optimizer = torch.optim.SGD(model.parameters(), lr=lr)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)

        import time

        def train():
            model.train()  # Turn on the train mode
            total_loss = 0.
            start_time = time.time()
            ntokens = len(TEXT.vocab.stoi)
            for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
                data, targets = get_batch(train_data, i)
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output.view(-1, ntokens), targets)
                loss.backward()
                grad_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(), 0.5)
                optimizer.step()

                total_loss += loss.item()
                log_interval = 200
                if batch % log_interval == 0 and batch > 0:
                    cur_loss = total_loss / log_interval
                    elapsed = time.time() - start_time
                    print('| epoch {:3d} | {:5d}/{:5d} batches | '
                          'lr {:02.2f} | ms/batch {:5.2f} | '
                          'loss {:5.2f} | ppl {:8.2f}'.format(
                              epoch, batch,
                              len(train_data) // bptt,
                              scheduler.get_lr()[0],
                              elapsed * 1000 / log_interval, cur_loss,
                              math.exp(cur_loss)))
                    total_loss = 0
                    start_time = time.time()

        def evaluate(eval_model, data_source):
            eval_model.eval()  # Turn on the evaluation mode
            total_loss = 0.
            ntokens = len(TEXT.vocab.stoi)
            with torch.no_grad():
                for i in range(0, data_source.size(0) - 1, bptt):
                    data, targets = get_batch(data_source, i)
                    output = eval_model(data)
                    output_flat = output.view(-1, ntokens)
                    total_loss += len(data) * criterion(output_flat,
                                                        targets).item()
            return total_loss / (len(data_source) - 1)

        best_val_loss = float("inf")
        epochs = 3  # The number of epochs
        best_model = None

        for epoch in range(1, epochs + 1):
            epoch_start_time = time.time()
            train()
            val_loss = evaluate(model, val_data)
            print('-' * 89)
            print(
                '| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
                'valid ppl {:8.2f}'.format(epoch,
                                           (time.time() - epoch_start_time),
                                           val_loss, math.exp(val_loss)))
            print('-' * 89)

            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_model = model

            scheduler.step()

        test_loss = evaluate(best_model, test_data)
        print('=' * 89)
        print(
            '| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
                test_loss, math.exp(test_loss)))
        print('=' * 89)
Ejemplo n.º 20
0
  def test_save_early_collapse_on_cifar100(self):
    """
    Usage:
        export LD_LIBRARY_PATH=~/anaconda3/envs/py36/lib/
        export TIME_STR=1
        export PYTHONPATH=./exp:./BigGAN_PyTorch_1_lib:./
        python -c "from exp.tests.test_BigGAN import TestingCIFAR10_BigGAN_v1;\
          TestingCIFAR10_BigGAN_v1().test_save_FID_cbn_index_012_figure()"

    :return:
    """
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
      os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if 'TIME_STR' not in os.environ:
      os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
    from template_lib.v2.config_cfgnode.argparser import get_command_and_outdir, setup_outdir_and_yaml, \
      get_append_cmd_str, start_cmd_run

    command, outdir = get_command_and_outdir(self, func_name=sys._getframe().f_code.co_name, file=__file__)
    argv_str = f"""
                    --tl_config_file exp/configs/Figures.yaml
                    --tl_command {command}
                    --tl_outdir {outdir}
                    """
    args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

    cfg_str = """
              xlabel: "Epoch"
              ylabel: "FID"
              fontsize:
                tick_fs: 14
                xylabel_fs: 20
                legend_size: 12
              clip_x:
                - 0
                - 24500096
            #  properties:
            #    xlim:
            #      - 0
            #      - 336
            #    ylim:
            #      - 0
            #      - 30
              get_min_value: true
              add_auxi_label: false
              lines:
                biggan_pd:
                  result_dir: "results/CIFAR10/train_cifar100_\
                  20201002-19_37_05_825"
                  sub_path: "textdir/evaltf.ma0.FID_tf.log"
                  properties:
                    label: "projection discriminator (BigGAN)"
                    marker: "."
              saved_file: "early_collapse_cifar100.pdf"
            """

    import matplotlib.pyplot as plt
    import numpy as np
    from template_lib.utils import colors_dict

    fig, ax = plt.subplots()
    # ax.set_xticks(range(0, 600, 100))
    ax.tick_params(labelsize=cfg.fontsize.tick_fs)
    ax.set_xlabel(cfg.xlabel, fontsize=cfg.fontsize.xylabel_fs)
    ax.set_ylabel(cfg.ylabel, fontsize=cfg.fontsize.xylabel_fs)

    colors = list(colors_dict.values())
    # colors = [plt.cm.cool(i / float(num_plot - 1)) for i in range(num_plot)]

    properties = cfg.get('properties', {})
    ax.set(**properties)
    for idx, (_, data_dict) in enumerate(cfg.lines.items()):
      log_file = os.path.join(data_dict.result_dir, data_dict.sub_path)
      data = np.loadtxt(log_file, delimiter=':')

      if 'clip_x' in cfg:
        data_xlim = cfg.clip_x[-1]
        data = data[data[:, 0] <= data_xlim]

      if cfg.get_min_value:
        best_index = data[:, 1].argmin()
      else:
        best_index = data[:, 1].argmax()
      best_x = int(data[:, 0][best_index])
      best_y = data[:, 1][best_index]

      if cfg.add_auxi_label:
        data_dict.properties.label = f'x_{best_x}-y_{best_y:.3f}-' + getattr(data_dict.properties, 'label', '')
      ax.plot(data[:, 0], data[:, 1], color=colors[idx], **data_dict.properties)
      pass

    ax.legend(prop={'size': cfg.fontsize.legend_size})
    fig.show()
    saved_file = os.path.join(args.tl_outdir, cfg.saved_file)
    fig.savefig(saved_file, bbox_inches='tight', pad_inches=0.01)
    print(f'Save to {saved_file}')
    pass
Ejemplo n.º 21
0
    def test_omni_GAN(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file template_lib/v2/GAN/loss/configs/omni_loss_OmniLoss.yaml
                --tl_command {command}
                --tl_outdir {outdir}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)
        import torch
        from template_lib.v2.GAN.loss import build_GAN_loss

        omni_loss = build_GAN_loss(cfg)

        b, nc = 32, 100
        out_dim = nc + 2

        pred = torch.rand(b, out_dim).cuda().requires_grad_()
        y = torch.randint(0, nc, (b, )).cuda()

        D_loss_real, logits_pos, logits_neg = omni_loss(pred=pred,
                                                        positive=(y,
                                                                  out_dim - 2),
                                                        return_logits=True)

        D_loss_fake = omni_loss(pred=pred, positive=(out_dim - 1, ))

        G_loss, logits_pos, logits_neg = omni_loss(pred=pred,
                                                   positive=(y, out_dim - 2),
                                                   return_logits=True)

        pass
Ejemplo n.º 22
0
  def test_save_OmniGAN_ImageNet128_IS(self):
    """
    Usage:
        export TIME_STR=1
        export PYTHONPATH=./exp:./BigGAN_PyTorch_1_lib:./
        python -c "from exp.tests.test_BigGAN_v1 import Testing_Figures;\
          Testing_Figures().test_save_early_collapse_on_cifar100()"

    :return:
    """
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
      os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if 'TIME_STR' not in os.environ:
      os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
    from template_lib.v2.config_cfgnode.argparser import get_command_and_outdir, setup_outdir_and_yaml, \
      get_append_cmd_str, start_cmd_run
    from template_lib.v2.matplot import set_font
    set_font()

    command, outdir = get_command_and_outdir(self, func_name=sys._getframe().f_code.co_name, file=__file__)
    argv_str = f"""
                    --tl_config_file exp/tests/configs/Figures.yaml
                    --tl_command {command}
                    --tl_outdir {outdir}
                    """
    args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

    import matplotlib.pyplot as plt
    import numpy as np
    import pickle
    from template_lib.utils import colors_dict

    fig, ax = plt.subplots()
    # ax.set_xticks(range(0, 600, 100))
    ax.tick_params(labelsize=cfg.fontsize.tick_fs)
    ax.set_xlabel(cfg.xlabel, fontsize=cfg.fontsize.xylabel_fs)
    ax.set_ylabel(cfg.ylabel, fontsize=cfg.fontsize.xylabel_fs)

    properties = cfg.get('properties', {})
    ax.set(**properties)
    with open(cfg.data_pickle, 'rb') as f:
      data_list = pickle.load(f)
    load_data_dict = data_list[cfg.data_index]

    for idx, (_, data_dict) in enumerate(cfg.lines.items()):
      data = load_data_dict[data_dict.key]

      if 'clip_x' in cfg:
        data_xlim = cfg.clip_x[-1]
        data = data[data[:, 0] <= data_xlim]

      if cfg.get_min_value:
        best_index = data[:, 1].argmin()
      else:
        best_index = data[:, 1].argmax()
      best_x = int(data[:, 0][best_index])
      best_y = data[:, 1][best_index]

      if cfg.get('add_auxi_label', False):
        data_dict.properties.label = f'x_{best_x}-y_{best_y:.3f}-' + getattr(data_dict.properties, 'label', '')
      ax.plot(data[:, 0], data[:, 1], color=colors_dict[data_dict.color], **data_dict.properties)
      pass

    ax.legend(prop={'size': cfg.fontsize.legend_size}, ncol=1)
    fig.show()
    saved_file = os.path.join(args.tl_outdir, cfg.saved_file)
    fig.savefig(saved_file, bbox_inches='tight', pad_inches=0.01)
    print(f'Save to {saved_file}')
    pass
    def test_cmap(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from template_lib.utils.plot_results import PlotResults
        import collections, shutil

        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        from matplotlib import colors
        import matplotlib.pyplot as plt
        import numpy as np

        np.random.seed(19680801)
        Nr = 3
        Nc = 2
        cmap = "cool"

        fig, axs = plt.subplots(Nr, Nc)
        fig.suptitle('Multiple images')

        images = []
        for i in range(Nr):
            for j in range(Nc):
                # Generate data with a range that varies from one plot to the next.
                data = ((1 + i + j) / 10) * np.random.rand(10, 20) * 1e-6
                images.append(axs[i, j].imshow(data, cmap=cmap))
                axs[i, j].label_outer()

        # Find the min and max of all colors for use in setting the color scale.
        vmin = min(image.get_array().min() for image in images)
        vmax = max(image.get_array().max() for image in images)
        norm = colors.Normalize(vmin=vmin, vmax=vmax)
        for im in images:
            im.set_norm(norm)

        fig.colorbar(images[0], ax=axs, orientation='horizontal', fraction=.1)

        # Make images respond to changes in the norm of other images (e.g. via the
        # "edit axis, curves and images parameters" GUI on Qt), but be careful not to
        # recurse infinitely!
        def update(changed_image):
            for im in images:
                if (changed_image.get_cmap() != im.get_cmap()
                        or changed_image.get_clim() != im.get_clim()):
                    im.set_cmap(changed_image.get_cmap())
                    im.set_clim(changed_image.get_clim())

        for im in images:
            im.callbacksSM.connect('changed', update)

        plt.show()
Ejemplo n.º 24
0
    def test_plot_compare_methods(self):
        """

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from datetime import datetime
        TIME_STR = bool(int(os.getenv('TIME_STR', 0)))
        time_str = datetime.now().strftime("%Y%m%d-%H_%M_%S_%f")[:-3]
        outdir = outdir if not TIME_STR else (outdir + '_' + time_str)
        print(outdir)

        import collections, shutil
        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        from collections import OrderedDict
        from functools import partial
        from time import time

        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        from matplotlib.ticker import NullFormatter

        from sklearn import manifold, datasets

        # Next line to silence pyflakes. This import is needed.
        Axes3D

        n_points = 1000
        X, color = datasets.make_s_curve(n_points, random_state=0)
        n_neighbors = 10
        n_components = 2

        # Create figure
        fig = plt.figure(figsize=(15, 8))
        fig.suptitle("Manifold Learning with %i points, %i neighbors" %
                     (1000, n_neighbors),
                     fontsize=14)

        # Add 3d scatter plot
        ax = fig.add_subplot(251, projection='3d')
        ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
        ax.view_init(4, -72)

        # Set-up manifold methods
        LLE = partial(manifold.LocallyLinearEmbedding,
                      n_neighbors,
                      n_components,
                      eigen_solver='auto')

        methods = OrderedDict()
        methods['LLE'] = LLE(method='standard')
        methods['LTSA'] = LLE(method='ltsa')
        methods['Hessian LLE'] = LLE(method='hessian')
        methods['Modified LLE'] = LLE(method='modified')
        methods['Isomap'] = manifold.Isomap(n_neighbors, n_components)
        methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1)
        methods['SE'] = manifold.SpectralEmbedding(n_components=n_components,
                                                   n_neighbors=n_neighbors)
        methods['t-SNE'] = manifold.TSNE(n_components=n_components,
                                         init='pca',
                                         random_state=0)

        # Plot results
        for i, (label, method) in enumerate(methods.items()):
            t0 = time()
            Y = method.fit_transform(X)
            t1 = time()
            print("%s: %.2g sec" % (label, t1 - t0))
            ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
            ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
            ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
            ax.xaxis.set_major_formatter(NullFormatter())
            ax.yaxis.set_major_formatter(NullFormatter())
            ax.axis('tight')

        plt.show()
        pass
Ejemplo n.º 25
0
    def test_modelarts_copy_data(self):
        """
    Usage:
        exp_name=wgan-pytorch0
        export root_obs=s3://bucket-cv-competition/ZhouPeng
        mkdir -p /cache/.keras/ && rm -rf $HOME/.keras && ln -s /cache/.keras $HOME/.keras
        export RESULTS_OBS=$root_obs/results/$exp_name
        python /home/work/user-job-dir/code/copy_tool.py \
          -s $root_obs/code/$exp_name \
          -d /cache/code/$exp_name -t copytree
        ln -s /cache/code/$exp_name /cache/code/template_lib
        cd /cache/code/$exp_name/

        export CUDA_VISIBLE_DEVICES=0
        export PORT=6006
        export TIME_STR=1
        export PYTHONPATH=../
        python -c "from utils import modelarts_utils; \
          modelarts_utils.TestingUnit().test_modelarts_copy_data()"
    :return:
    """
        import template_lib.utils as utils
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        outdir = os.path.join('results', sys._getframe().f_code.co_name)
        myargs = argparse.Namespace()

        def build_args():
            argv_str = f"""
            --config ./configs/config.yaml 
            --command test_command
            --resume False --resume_path None
            --resume_root None
            """
            parser = utils.args_parser.build_parser()
            if len(sys.argv) == 1:
                args = parser.parse_args(args=argv_str.split())
            else:
                args = parser.parse_args()
            args.CUDA_VISIBLE_DEVICES = os.environ['CUDA_VISIBLE_DEVICES']
            args = utils.config_utils.DotDict(vars(args))
            return args, argv_str

        args, argv_str = build_args()

        args.outdir = outdir
        args, myargs = utils.config.setup_args_and_myargs(args=args,
                                                          myargs=myargs,
                                                          start_tb=False)

        modelarts_sync_results(args, myargs, join=True)

        datapath_obs = 's3://bucket-cv-competition/ZhouPeng/keras/cifar10'
        datapath = '~/.keras/cifar10'
        modelarts_copy_data(datapath_obs=datapath_obs,
                            datapath=datapath,
                            overwrite=True)

        datapath_obs = 's3://bucket-cv-competition/ZhouPeng/keras/cifar10/cifar10_inception_moments.npz'
        datapath = '~/.keras/cifar10_inception_moments.npz'
        modelarts_copy_data(datapath_obs=datapath_obs,
                            datapath=datapath,
                            overwrite=False)

        modelarts_sync_results(args, myargs, join=True, end=True)
        input('End %s' % outdir)
        return
Ejemplo n.º 26
0
    def test_show_video(self, debug=True):
        """
    Usage:

        export CUDA_VISIBLE_DEVICES=7
        export TIME_STR=1
        export PYTHONPATH=./
        python -c "from template_lib.proj.streamlit.tests.test_streamlit import Testing_Streamlit;\
          Testing_Streamlit().test_show_video(debug=False)" \
          --tl_opts port 8530 start_web True show_video.num_video 7

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file template_lib/proj/streamlit/scripts/configs/Streamlit.yaml
                --tl_command {command}
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))

        script = "template_lib/proj/streamlit/scripts/run_web.py"
        if debug:
            cmd_str = f"""
          python 
            {script}
            {get_append_cmd_str(args)}
            --tl_debug
            --tl_opts
              """
        else:
            if cfg.start_web:
                cmd_str_prefix = f"""
                {os.path.dirname(sys.executable)}/streamlit run --server.port {cfg.port} 
                {script}
                --
              """
            else:
                cmd_str_prefix = f"python {script}"
            cmd_str = f"""
          {cmd_str_prefix}
            {get_append_cmd_str(args)}
            --tl_opts {tl_opts}
        """
        start_cmd_run(cmd_str)
        pass
Ejemplo n.º 27
0
    def test_step_closure(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                """
        args = setup_outdir_and_yaml(argv_str, return_cfg=True)

        from torch.nn import CrossEntropyLoss

        class DummyModel(nn.Module):
            def __init__(self, class_num=10):
                super(DummyModel, self).__init__()
                self.base = nn.Sequential(
                    nn.Conv2d(3, 64, kernel_size=3, padding=1),
                    nn.ReLU(),
                    nn.Conv2d(64, 128, kernel_size=3, padding=1),
                    nn.ReLU(),
                )
                self.gap = nn.AdaptiveAvgPool2d(1)
                self.fc = nn.Linear(128, class_num)

            def forward(self, x):
                x = self.base(x)
                x = self.gap(x)
                x = x.view(x.shape[0], -1)
                x = self.fc(x)
                return x

        dummy_model = DummyModel().cuda()

        optimizer = SGD(dummy_model.parameters(),
                        lr=1e-2,
                        momentum=0.9,
                        weight_decay=1e-4)
        # 定义loss
        loss_fn = CrossEntropyLoss()
        # 定义数据
        batch_size = 2
        data = torch.randn(64, 3, 64,
                           128).cuda()  # 制造假数据shape=64 * 3 * 64 * 128
        data_label = torch.randint(0, 10, size=(64, ),
                                   dtype=torch.long).cuda()  # 制造假的label

        for batch_index in range(10):
            batch_data = data[batch_index *
                              batch_size:batch_index * batch_size + batch_size]
            batch_label = data_label[batch_index *
                                     batch_size:batch_index * batch_size +
                                     batch_size]

            def closure():
                optimizer.zero_grad()  # 清空梯度
                output = dummy_model(batch_data)  # forward
                loss = loss_fn(output, batch_label)  # 计算loss
                loss.backward()  # backward
                print('No.{: 2d} loss: {:.6f}'.format(batch_index,
                                                      loss.item()))
                return loss

            optimizer.step(closure=closure)  # 更新参数

        pass
Ejemplo n.º 28
0
    def test_sr_merge_original_image_and_patches_tensor(self, debug=True):
        """
    Usage:
        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
        import torchvision.transforms.functional as trans_f

        img_path = "template_lib/datasets/images/zebra_GT_target_origin.png"
        # img_path = "results/Recons_SR_web_OmniINRGAN256/load_G_recons_sr_OmniInrGAN256/imgdir/zebra_GT_reconstruction64x.png"

        left = [640, 2300]
        upper = [960, 800]
        w = 400
        h = 160
        pad = 20
        size = 2048
        width = 20
        patch_width = 5

        image = Image.open(img_path)
        img_tensor = trans_f.to_tensor(image)
        img_tensor = trans_f.resize(img_tensor,
                                    size=size,
                                    interpolation=Image.NEAREST)

        out_image = sr_merge_original_image_and_patches_tensor(
            img_tensor=img_tensor,
            lefts=left,
            uppers=upper,
            w=w,
            h=h,
            pad=pad,
            width=width,
            patch_width=patch_width)

        out_image = trans_f.to_pil_image(out_image)
        fig, axes = plt.subplots(1, 1)
        axes.imshow(out_image)
        # axes[1].imshow(patch)
        fig.show()
        pass
Ejemplo n.º 29
0
    def test_diff_lr(self, debug=True):
        """
    Usage:
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/pypi/torch1_7_0 -d /cache/pypi -t copytree
        for filename in /cache/pypi/*.whl; do
            pip install $filename
        done
        proj_root=moco-exp
        python template_lib/modelarts/scripts/copy_tool.py \
          -s s3://bucket-7001/ZhouPeng/codes/$proj_root -d /cache/$proj_root -t copytree -b /cache/$proj_root/code.zip
        cd /cache/$proj_root
        pip install -r requirements.txt

        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                """
        args = setup_outdir_and_yaml(argv_str, return_cfg=True)

        from torch.optim import SGD
        from torch import nn

        class DummyModel(nn.Module):
            def __init__(self, class_num=10):
                super(DummyModel, self).__init__()
                self.base = nn.Sequential(
                    nn.Conv2d(3, 64, kernel_size=3, padding=1),
                    nn.ReLU(),
                    nn.Conv2d(64, 128, kernel_size=3, padding=1),
                    nn.ReLU(),
                )
                self.gap = nn.AdaptiveAvgPool2d(1)
                self.fc = nn.Linear(128, class_num)

            def forward(self, x):
                x = self.base(x)
                x = self.gap(x)
                x = x.view(x.shape[0], -1)
                x = self.fc(x)
                return x

        model = DummyModel().cuda()

        optimizer = SGD(
            [
                {
                    'params': model.base.parameters()
                },
                {
                    'params': model.fc.parameters(),
                    'lr': 1e-3
                }  # 对 fc的参数设置不同的学习率
            ],
            lr=1e-2,
            momentum=0.9)

        pass
Ejemplo n.º 30
0
    def test_learning_methods_on_graphs(self):
        """
    export LD_LIBRARY_PATH=/usr/local/cuda-10.0/lib64:/usr/local/cudnn-10.0-v7.6.5.32
    proxychains python -c "from template_lib.examples.DGL.geometric.test_pytorch_geometric import TestingGeometric;\
      TestingGeometric().test_learning_methods_on_graphs()"

    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'

        from datetime import datetime
        TIME_STR = bool(int(os.getenv('TIME_STR', 0)))
        time_str = datetime.now().strftime("%Y%m%d-%H_%M_%S_%f")[:-3]
        outdir = outdir if not TIME_STR else (outdir + '_' + time_str)
        print(outdir)

        import collections, shutil
        shutil.rmtree(outdir, ignore_errors=True)
        os.makedirs(outdir, exist_ok=True)

        from torch_geometric.datasets import Planetoid

        dataset = Planetoid(root='datasets/cora', name='Cora')

        import torch
        import torch.nn.functional as F
        from torch_geometric.nn import GCNConv

        class Net(torch.nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv1 = GCNConv(dataset.num_node_features, 16)
                self.conv2 = GCNConv(16, dataset.num_classes)

            def forward(self, data):
                x, edge_index = data.x, data.edge_index

                x = self.conv1(x, edge_index)
                x = F.relu(x)
                x = F.dropout(x, training=self.training)
                x = self.conv2(x, edge_index)

                return F.log_softmax(x, dim=1)

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model = Net().to(device)
        data = dataset[0].to(device)
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=0.01,
                                     weight_decay=5e-4)

        model.train()
        for epoch in range(200):
            optimizer.zero_grad()
            out = model(data)
            loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
            loss.backward()
            optimizer.step()

        model.eval()
        _, pred = model(data).max(dim=1)
        correct = float(pred[data.test_mask].eq(
            data.y[data.test_mask]).sum().item())
        acc = correct / data.test_mask.sum().item()
        print('Accuracy: {:.4f}'.format(acc))
        pass