Ejemplo n.º 1
0
    def __init__(self, cfg, **kwargs):
        super(MixedActCell, self).__init__()

        in_channels = get_attr_kwargs(cfg, "in_channels", **kwargs)
        out_channels = get_attr_kwargs(cfg, "out_channels", **kwargs)
        ksize = get_attr_kwargs(cfg, "ksize", default=3, **kwargs)
        up_mode = get_attr_kwargs(cfg, "up_mode", **kwargs)
        num_skip_in = get_attr_kwargs(cfg, "num_skip_in", default=0, **kwargs)
        short_cut = get_attr_kwargs(cfg, "short_cut", default=False, **kwargs)
        norm = get_attr_kwargs(cfg, "norm", default=None, **kwargs)
        cfg_mix_layer = get_attr_kwargs(cfg, "cfg_mix_layer", **kwargs)
        cfg_ops = get_attr_kwargs(cfg, "cfg_ops", **kwargs)

        self.c1 = nn.Conv2d(in_channels,
                            out_channels,
                            ksize,
                            padding=ksize // 2)
        self.c2 = nn.Conv2d(out_channels,
                            out_channels,
                            ksize,
                            padding=ksize // 2)

        self.act1 = build_d2layer(cfg_mix_layer,
                                  cfg_ops=cfg_ops,
                                  num_parameters=in_channels,
                                  out_channels=in_channels)
        self.act2 = build_d2layer(cfg_mix_layer,
                                  cfg_ops=cfg_ops,
                                  num_parameters=out_channels,
                                  out_channels=out_channels)

        assert up_mode in UP_MODES
        self.up_mode = up_mode
        self.norm = norm
        if norm:
            assert norm in NORMS
            if norm == 'bn':
                self.n1 = nn.BatchNorm2d(in_channels)
                self.n2 = nn.BatchNorm2d(out_channels)
            elif norm == 'in':
                self.n1 = nn.InstanceNorm2d(in_channels)
                self.n2 = nn.InstanceNorm2d(out_channels)
            else:
                raise NotImplementedError(norm)

        # inner shortcut
        self.c_sc = None
        if short_cut:
            self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1)

        # cross scale skip
        self.skip_in_ops = None
        if num_skip_in:
            self.skip_in_ops = nn.ModuleList([
                nn.Conv2d(out_channels, out_channels, kernel_size=1)
                for _ in range(num_skip_in)
            ])
        pass
Ejemplo n.º 2
0
    def test_SNConv2d(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
      SNConv2d_3x3:
        name: "SNConv2d"
        in_channels: "kwargs['in_channels']"
        out_channels: "kwargs['out_channels']"
        kernel_size: 3
        padding: 1
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))
        op = build_d2layer(cfg.SNConv2d_3x3, in_channels=8, out_channels=8)

        op.cuda()
        x = torch.randn(2, 8, 32, 32).cuda()
        y = op(x)
        pass
Ejemplo n.º 3
0
    def test_StyleV2Conv(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
          name: "StyleV2Conv"
          update_cfg: true
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))

        op = build_d2layer(cfg, in_channels=256, out_channels=256)
        op.cuda()
        out = op.test_case(in_channels=256, out_channels=256)

        import torchviz
        g = torchviz.make_dot(out)
        g.view()
        pass
Ejemplo n.º 4
0
    def test_CondInstanceNorm2d(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
          name: "CondInstanceNorm2d"
          in_features: "kwargs['in_features']"
          out_features: "kwargs['out_features']"
          cfg_fc:
            name: "Linear"
            in_features: "kwargs['in_features']"
            out_features: "kwargs['out_features']"
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))

        bs = 2
        in_features = out_features = 8

        op = build_d2layer(cfg,
                           in_features=in_features,
                           out_features=out_features)
        op.cuda()
        x = torch.randn(bs, in_features, 32, 32).cuda()
        y = torch.randn(bs, in_features).cuda()

        x = op(x, y)

        import torchviz
        g = torchviz.make_dot(x)
        g.view()
        pass
Ejemplo n.º 5
0
    def test_DepthwiseSeparableConv2d(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
        name: "DepthwiseSeparableConv2d"
        in_channels: 256
        out_channels: 256
        kernel_size: 7
        padding: 3
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))
        op = build_d2layer(cfg)

        op.cuda()
        x = torch.randn(2, 256, 32, 32).cuda()
        y = op(x)

        import torchviz
        g = torchviz.make_dot(y)
        g.view()
        pass
Ejemplo n.º 6
0
    def test_MixedLayerCond(self):
        """
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'PORT' not in os.environ:
            os.environ['PORT'] = '6006'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '1'
        # func name
        assert sys._getframe().f_code.co_name.startswith('test_')
        command = sys._getframe().f_code.co_name[5:]
        class_name = self.__class__.__name__[7:] \
          if self.__class__.__name__.startswith('Testing') \
          else self.__class__.__name__
        outdir = f'results/{class_name}/{command}'
        import yaml
        from template_lib.d2.layers import build_d2layer

        cfg_str = """
      layer:
        name: "MixedLayerCond"
        in_channels: "kwargs['in_channels']"
        out_channels: "kwargs['out_channels']"
        cfg_ops: "kwargs['cfg_ops']"
        cfg_bn:
          name: "BatchNorm2d"
          num_features: "kwargs['num_features']"
          affine: true
          track_running_stats: true
        cfg_act:
          name: "ReLU"
      cfg_ops:
        SNConv2d_3x3:
          name: "SNConv2d"
          in_channels: "kwargs['in_channels']"
          out_channels: "kwargs['out_channels']"
          kernel_size: 3
          padding: 1
        Conv2d_3x3:
          name: "Conv2d"
          in_channels: "kwargs['in_channels']"
          out_channels: "kwargs['out_channels']"
          kernel_size: 3
          padding: 1
    """
        cfg = EasyDict(yaml.safe_load(cfg_str))
        op = build_d2layer(cfg.layer,
                           in_channels=8,
                           out_channels=8,
                           cfg_ops=cfg.cfg_ops)
        num_classes = 2
        bs = num_classes
        num_ops = 2

        op.cuda()
        x = torch.randn(bs, 8, 32, 32).cuda()
        y = torch.arange(bs).cuda()
        sample_arc = torch.arange(num_ops).cuda()
        x = op(x, y, sample_arc)
        pass
Ejemplo n.º 7
0
    def __init__(self, cfg, **kwargs):
        super().__init__()

        self.n_classes = get_attr_kwargs(cfg, 'n_classes', **kwargs)
        self.ch = get_attr_kwargs(cfg, 'ch', default=512, **kwargs)
        self.linear_ch = get_attr_kwargs(cfg,
                                         'linear_ch',
                                         default=128,
                                         **kwargs)
        self.bottom_width = get_attr_kwargs(cfg,
                                            'bottom_width',
                                            default=4,
                                            **kwargs)
        self.dim_z = get_attr_kwargs(cfg, 'dim_z', default=128, **kwargs)
        self.init_type = get_attr_kwargs(cfg,
                                         'init_type',
                                         default='xavier_uniform',
                                         **kwargs)
        self.embedding_dim = get_attr_kwargs(cfg,
                                             'embedding_dim',
                                             default=128,
                                             **kwargs)
        self.cfg_upsample = get_attr_kwargs(cfg, 'cfg_upsample', **kwargs)
        self.num_cells = get_attr_kwargs(cfg, 'num_cells', **kwargs)
        self.cfg_cell = get_attr_kwargs(cfg, 'cfg_cell', **kwargs)
        self.cfg_ops = get_attr_kwargs(cfg, 'cfg_ops', **kwargs)
        self.cfg_out_bn = get_attr_kwargs(cfg, 'cfg_out_bn', **kwargs)
        self.fixed_arc_file = get_attr_kwargs(cfg,
                                              'fixed_arc_file',
                                              default=None,
                                              **kwargs)
        self.fixed_epoch = get_attr_kwargs(cfg,
                                           'fixed_epoch',
                                           default=0,
                                           **kwargs)
        self.layer_op_idx = get_attr_kwargs(cfg,
                                            'layer_op_idx',
                                            default=None,
                                            **kwargs)

        self.device = torch.device(f'cuda:{comm.get_rank()}')

        if self.layer_op_idx is not None:
            # "[1 1 0 1 2 0 2 1 2 0 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2]"
            self.layer_op_idx = json.loads(
                self.layer_op_idx.replace(' ', ', ').replace('][',
                                                             '], [').strip())
            assert len(self.layer_op_idx) % self.num_cells == 0
            num_edges_of_cell = len(self.layer_op_idx) // self.num_cells
        elif self.fixed_arc_file is not None:
            sample_arc = self._get_arc_from_file(
                fixed_arc_file=self.fixed_arc_file,
                fixed_epoch=self.fixed_epoch,
                nrows=1)
            self.layer_op_idx = sample_arc.reshape(-1)
            assert len(self.layer_op_idx) % self.num_cells == 0
            num_edges_of_cell = len(self.layer_op_idx) // self.num_cells

        self.num_slots = self.num_cells + 1
        self.z_chunk_size = (self.dim_z // self.num_slots)
        self.dim_z_input = self.dim_z
        self.cbn_in_features = self.embedding_dim + self.z_chunk_size
        # Prepare class embedding
        self.class_embedding = nn.Embedding(self.n_classes, self.embedding_dim)

        self.l1 = nn.Linear(self.dim_z_input,
                            (self.bottom_width**2) * self.linear_ch)
        self.conv1 = nn.Conv2d(in_channels=self.linear_ch,
                               out_channels=self.ch,
                               kernel_size=1,
                               stride=1,
                               padding=0)
        self.upsample = build_d2layer(self.cfg_upsample)

        self.cells = nn.ModuleList()
        for i in range(self.num_cells):
            if self.layer_op_idx is not None:
                cell = build_d2layer(
                    self.cfg_cell,
                    in_channels=self.ch,
                    cfg_ops=self.cfg_ops,
                    cell_op_idx=self.layer_op_idx[i *
                                                  num_edges_of_cell:(i + 1) *
                                                  num_edges_of_cell])
            else:
                cell = build_d2layer(self.cfg_cell,
                                     in_channels=self.ch,
                                     cfg_ops=self.cfg_ops)
            self.cells.append(cell)

        self.num_branches = len(self.cfg_ops)
        self.num_edges_of_cell = cell.num_edges
        self.num_layers = len(self.cells) * self.num_edges_of_cell

        out_bn = build_d2layer(self.cfg_out_bn, num_features=self.ch, **kwargs)
        self.to_rgb = nn.Sequential(out_bn, nn.ReLU(),
                                    nn.Conv2d(self.ch, 3, 3, 1, 1), nn.Tanh())

        weights_init_func = functools.partial(self.weights_init,
                                              init_type=self.init_type)
        self.apply(weights_init_func)
Ejemplo n.º 8
0
    def __init__(self, cfg, **kwargs):
        super(DenseGenerator_v2, self).__init__()

        self.ch = get_attr_kwargs(cfg, 'ch', default=256, **kwargs)
        self.linear_ch = get_attr_kwargs(cfg,
                                         'linear_ch',
                                         default=128,
                                         **kwargs)
        self.bottom_width = get_attr_kwargs(cfg,
                                            'bottom_width',
                                            default=4,
                                            **kwargs)
        self.dim_z = get_attr_kwargs(cfg, 'dim_z', default=128, **kwargs)
        self.init_type = get_attr_kwargs(cfg,
                                         'init_type',
                                         default='xavier_uniform',
                                         **kwargs)
        self.cfg_upsample = get_attr_kwargs(cfg, 'cfg_upsample', **kwargs)
        self.num_cells = get_attr_kwargs(cfg, 'num_cells', **kwargs)
        self.cfg_cell = get_attr_kwargs(cfg, 'cfg_cell', **kwargs)
        self.layer_op_idx = get_attr_kwargs(cfg,
                                            'layer_op_idx',
                                            default=None,
                                            **kwargs)
        self.cfg_ops = get_attr_kwargs(cfg, 'cfg_ops', **kwargs)
        self.cfg_out_bn = get_attr_kwargs(cfg, 'cfg_out_bn', **kwargs)

        if self.layer_op_idx is not None:
            self.layer_op_idx = json.loads(
                self.layer_op_idx.replace(' ', ', ').replace('][',
                                                             '], [').strip())
            assert len(self.layer_op_idx) % self.num_cells == 0
            num_edges_of_cell = len(self.layer_op_idx) // self.num_cells

        self.device = torch.device(f'cuda:{comm.get_rank()}')

        self.l1 = nn.Linear(self.dim_z,
                            (self.bottom_width**2) * self.linear_ch)

        # self.act = nn.ReLU()
        self.conv1 = nn.Conv2d(in_channels=self.linear_ch,
                               out_channels=self.ch,
                               kernel_size=1,
                               stride=1,
                               padding=0)

        self.upsample = build_d2layer(self.cfg_upsample)

        self.cells = nn.ModuleList()
        for i in range(self.num_cells):
            if self.layer_op_idx is not None:
                cell = build_d2layer(
                    self.cfg_cell,
                    in_channels=self.ch,
                    cfg_ops=self.cfg_ops,
                    cell_op_idx=self.layer_op_idx[i *
                                                  num_edges_of_cell:(i + 1) *
                                                  num_edges_of_cell])
            else:
                cell = build_d2layer(self.cfg_cell,
                                     in_channels=self.ch,
                                     cfg_ops=self.cfg_ops)
            self.cells.append(cell)

        self.num_branches = len(self.cfg_ops)
        self.num_edges_of_cell = cell.num_edges
        self.num_layers = len(self.cells) * self.num_edges_of_cell

        out_bn = build_d2layer(self.cfg_out_bn, num_features=self.ch, **kwargs)
        self.to_rgb = nn.Sequential(out_bn, nn.ReLU(),
                                    nn.Conv2d(self.ch, 3, 3, 1, 1), nn.Tanh())

        weights_init_func = functools.partial(self.weights_init,
                                              init_type=self.init_type)
        self.apply(weights_init_func)
Ejemplo n.º 9
0
    def __init__(self, cfg, **kwargs):
        super().__init__()

        cfg = self.update_cfg(cfg)

        self.ch = get_attr_kwargs(cfg, 'ch', default=512, **kwargs)
        self.d_spectral_norm = get_attr_kwargs(cfg,
                                               'd_spectral_norm',
                                               default=True,
                                               **kwargs)
        self.init_type = get_attr_kwargs(cfg,
                                         'init_type',
                                         default='xavier_uniform',
                                         **kwargs)
        self.cfg_downsample = get_attr_kwargs(cfg, 'cfg_downsample', **kwargs)
        self.num_cells = get_attr_kwargs(cfg, 'num_cells', **kwargs)
        self.cfg_cell = get_attr_kwargs(cfg, 'cfg_cell', **kwargs)
        self.cfg_ops = get_attr_kwargs(cfg, 'cfg_ops', **kwargs)
        self.fixed_arc_file = get_attr_kwargs(cfg,
                                              'fixed_arc_file',
                                              default=None,
                                              **kwargs)
        self.fixed_epoch = get_attr_kwargs(cfg,
                                           'fixed_epoch',
                                           default=0,
                                           **kwargs)
        self.layer_op_idx = get_attr_kwargs(cfg,
                                            'layer_op_idx',
                                            default=None,
                                            **kwargs)

        self.device = torch.device(f'cuda:{comm.get_rank()}')

        if self.layer_op_idx is not None:
            # "[1 1 0 1 2 0 2 1 2 0 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2]"
            self.layer_op_idx = json.loads(
                self.layer_op_idx.replace(' ', ', ').replace('][',
                                                             '], [').strip())
            assert len(self.layer_op_idx) % self.num_cells == 0
            num_edges_of_cell = len(self.layer_op_idx) // self.num_cells
        elif self.fixed_arc_file is not None:
            sample_arc = self._get_arc_from_file(
                fixed_arc_file=self.fixed_arc_file,
                fixed_epoch=self.fixed_epoch,
                nrows=1)
            self.layer_op_idx = sample_arc.reshape(-1)
            assert len(self.layer_op_idx) % self.num_cells == 0
            num_edges_of_cell = len(self.layer_op_idx) // self.num_cells

        self.conv1 = nn.Conv2d(in_channels=3,
                               out_channels=self.ch,
                               kernel_size=1)
        if self.d_spectral_norm:
            self.conv1 = nn.utils.spectral_norm(self.conv1)
        self.downsample = build_d2layer(self.cfg_downsample)

        self.cells = nn.ModuleList()
        for i in range(self.num_cells):
            if self.layer_op_idx is not None:
                cell = build_d2layer(
                    self.cfg_cell,
                    in_channels=self.ch,
                    cfg_ops=self.cfg_ops,
                    cell_op_idx=self.layer_op_idx[i *
                                                  num_edges_of_cell:(i + 1) *
                                                  num_edges_of_cell])
            else:
                cell = build_d2layer(self.cfg_cell,
                                     in_channels=self.ch,
                                     cfg_ops=self.cfg_ops)
            self.cells.append(cell)

        self.num_branches = len(self.cfg_ops)
        self.num_edges_of_cell = cell.num_edges
        self.num_layers = len(self.cells) * self.num_edges_of_cell

        self.fc = nn.Linear(self.ch, 1, bias=False)
        if self.d_spectral_norm:
            self.fc = nn.utils.spectral_norm(self.fc)

        weights_init_func = functools.partial(self.weights_init,
                                              init_type=self.init_type)
        self.apply(weights_init_func)