コード例 #1
0
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_007 import Encoder, Decoder
        self.encoder = Encoder(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()
コード例 #2
0
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.md_loss = MeanDistanceLoss()
        self.er_loss = EntropyRegularizationLoss()
        self.d_loss = DistanceLoss()

        # Model
        from recon.cifar10.cnn_model_001 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
コード例 #3
0
ファイル: experiments.py プロジェクト: kzky/works
class Experiment004(Experiment003):
    """Enc-MLP-Dec

    - MeanDistanceLoss
    - ResNet
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.md_loss = MeanDistanceLoss()
        self.er_loss = EntropyRegularizationLoss()
        self.d_loss = DistanceLoss()

        # Model
        from recon.cifar10.cnn_model_001 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()

    def train(self, x_l, y, x_u):
        self._train(x_l, (x_l, y), y)
        self._train(x_u, (x_l, y))
        
    def _train(self, x, xy, y_0=None):
        x_, y_ = xy
        
        # Encoder/Decoder
        h = self.encoder(x)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)   # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        loss += self.md_loss(self.encoder.hiddens[-1])  # MD loss

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_mlp.update()
        
    def cleargrads(self, ):
        self.encoder.cleargrads()
        self.mlp.cleargrads()
コード例 #4
0
class Experiment004(Experiment003):
    """Enc-MLP-Dec

    - MeanDistanceLoss
    - ResNet
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.md_loss = MeanDistanceLoss()
        self.er_loss = EntropyRegularizationLoss()
        self.d_loss = DistanceLoss()

        # Model
        from recon.cifar10.cnn_model_001 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()

    def train(self, x_l, y, x_u):
        self._train(x_l, (x_l, y), y)
        self._train(x_u, (x_l, y))

    def _train(self, x, xy, y_0=None):
        x_, y_ = xy

        # Encoder/Decoder
        h = self.encoder(x)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)  # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        loss += self.md_loss(self.encoder.hiddens[-1])  # MD loss

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_mlp.update()

    def cleargrads(self, ):
        self.encoder.cleargrads()
        self.mlp.cleargrads()
コード例 #5
0
class Experiment002(Experiment000):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - ResNet
    - U-Net
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_002 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()

    def _train(self, x, xy, y_0=None):
        x_, y_ = xy

        # Encoder/Decoder
        h = self.encoder(x)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)  # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        x_rec = self.decoder(h, self.encoder.hiddens)
        loss += self.recon_loss(x, x_rec)

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()
        self.optimizer_mlp.update()
コード例 #6
0
ファイル: experiments.py プロジェクト: kzky/works
class Experiment002(Experiment000):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - ResNet
    - U-Net
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_002 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()

    def _train(self, x, xy, y_0=None):
        x_, y_ = xy
        
        # Encoder/Decoder
        h = self.encoder(x)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)   # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        x_rec = self.decoder(h, self.encoder.hiddens)
        loss += self.recon_loss(x, x_rec)

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()
        self.optimizer_mlp.update()
コード例 #7
0
class Experiment009(Experiment008):
    """Enc-MLP-Dec

    - ConvPool-CNN-C (Springenberg et al., 2014, Salimans&Kingma (2016))
    - Encoder contains classifier
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_007 import Encoder, Decoder
        self.encoder = Encoder(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()

    def _train(self, x, xy, y_0=None):
        x_, y_ = xy

        # Encoder/Decoder
        y_pred = self.encoder(x)

        loss = 0
        loss += self.er_loss(y_pred)  # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        x_rec = self.decoder(y_pred)
        loss += self.recon_loss(x, x_rec)

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()

    def cleargrads(self, ):
        self.encoder.cleargrads()
        self.decoder.cleargrads()
コード例 #8
0
ファイル: experiments.py プロジェクト: kzky/works
class Experiment009(Experiment008):
    """Enc-MLP-Dec

    - ConvPool-CNN-C (Springenberg et al., 2014, Salimans&Kingma (2016))
    - Encoder contains classifier
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_007 import Encoder, Decoder
        self.encoder = Encoder(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()
        
    def _train(self, x, xy, y_0=None):
        x_, y_ = xy
        
        # Encoder/Decoder
        y_pred = self.encoder(x)

        loss = 0
        loss += self.er_loss(y_pred)   # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        x_rec = self.decoder(y_pred)
        loss += self.recon_loss(x, x_rec)

        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()

    def cleargrads(self, ):
        self.encoder.cleargrads()
        self.decoder.cleargrads()
コード例 #9
0
ファイル: experiments.py プロジェクト: kzky/works
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_007 import Encoder, Decoder
        self.encoder = Encoder(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()
コード例 #10
0
ファイル: experiments.py プロジェクト: kzky/works
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.md_loss = MeanDistanceLoss()
        self.er_loss = EntropyRegularizationLoss()
        self.d_loss = DistanceLoss()

        # Model
        from recon.cifar10.cnn_model_001 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
コード例 #11
0
class Experiment006(Experiment005):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - BN of classification is different from BN of encoding
    - Mean-only BN
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_004 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()
コード例 #12
0
ファイル: experiments.py プロジェクト: kzky/works
class Experiment006(Experiment005):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - BN of classification is different from BN of encoding
    - Mean-only BN
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_004 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()
コード例 #13
0
class Experiment007(Experiment000):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - ResNet
    - BN of classification is different from BN of encoding
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_005 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None

        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()

    def train(self, x_l, y, x_u):
        self._train(x_l, (x_l, y), y)
        self._train(x_u, (x_l, y))

    def _train(self, x, xy, y_0=None):
        x_, y_ = xy

        # Encoder/Decoder
        h = self.encoder(x, enc=False)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)  # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        h = self.encoder(x, enc=True)
        x_rec = self.decoder(h)
        loss += self.recon_loss(x, x_rec) \
                + reduce(lambda u, v: u + v,
                         [self.recon_loss(u, v) \
                          for u, v in zip(self.encoder.hiddens,
                                          self.decoder.hiddens[::-1])])  # RC loss
        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()
        self.optimizer_mlp.update()

    def test(self, x, y):
        h = self.encoder(x, enc=False, test=True)
        y_pred = self.mlp(h)
        acc = F.accuracy(y_pred, y)
        return acc
コード例 #14
0
ファイル: experiments.py プロジェクト: kzky/works
class Experiment007(Experiment000):
    """Enc-MLP-Dec

    - Encoder contains linear function
    - ResNet
    - BN of classification is different from BN of encoding
    """
    def __init__(self, device=None, learning_rate=1e-3, act=F.relu, n_cls=10):
        # Settings
        self.device = device
        self.act = act
        self.learning_rate = learning_rate
        self.n_cls = n_cls

        # Losses
        self.recon_loss = ReconstructionLoss()
        self.er_loss = EntropyRegularizationLoss()

        # Model
        from recon.cifar10.cnn_model_005 import Encoder, MLP, Decoder
        self.encoder = Encoder(device, act)
        self.mlp = MLP(device, act)
        self.decoder = Decoder(device, act)
        self.encoder.to_gpu(device) if self.device else None
        self.mlp.to_gpu(device) if self.device else None
        self.decoder.to_gpu(device) if self.device else None
        
        # Optimizer
        self.optimizer_enc = optimizers.Adam(learning_rate)
        self.optimizer_enc.setup(self.encoder)
        self.optimizer_enc.use_cleargrads()
        self.optimizer_mlp = optimizers.Adam(learning_rate)
        self.optimizer_mlp.setup(self.mlp)
        self.optimizer_mlp.use_cleargrads()
        self.optimizer_dec = optimizers.Adam(learning_rate)
        self.optimizer_dec.setup(self.decoder)
        self.optimizer_dec.use_cleargrads()

    def train(self, x_l, y, x_u):
        self._train(x_l, (x_l, y), y)
        self._train(x_u, (x_l, y))
        
    def _train(self, x, xy, y_0=None):
        x_, y_ = xy
        
        # Encoder/Decoder
        h = self.encoder(x, enc=False)
        y_pred = self.mlp(h)

        loss = 0
        loss += self.er_loss(y_pred)   # ER loss
        if y_0 is not None:
            loss += F.softmax_cross_entropy(y_pred, y_0)  # CE loss

        h = self.encoder(x, enc=True)
        x_rec = self.decoder(h)
        loss += self.recon_loss(x, x_rec) \
                + reduce(lambda u, v: u + v,
                         [self.recon_loss(u, v) \
                          for u, v in zip(self.encoder.hiddens,
                                          self.decoder.hiddens[::-1])])  # RC loss
        self.cleargrads()
        loss.backward()
        self.optimizer_enc.update()
        self.optimizer_dec.update()
        self.optimizer_mlp.update()

    def test(self, x, y):
        h = self.encoder(x, enc=False, test=True)
        y_pred = self.mlp(h)
        acc = F.accuracy(y_pred, y)
        return acc