Пример #1
0
    def __init__(self, features, channels, lr, betas, eps, device='cpu'):
        super().__init__()
        self.step = 0
        self.alpha = 1.0
        self.features = features
        self.channels = channels
        self.device = device
        self.lr, self.betas, self.eps = lr, betas, eps

        in_features = features * 2 ** 5
        self.from_rgb = ll.Conv2d(channels, in_features, 1, stride=1, padding=0)
        self.prev_from_rgb = None

        self.blocks = nn.ModuleList()
        self.final_block = nn.Sequential(
            ll.ConvBlock(in_features, in_features, 3, stride=1, padding=1),
            ll.ConvBlock(in_features, in_features, 3, stride=1, padding=1),
            ll.MinibatchStddev(),
            ll.ConvBlock(in_features+1, in_features, 3, stride=1, padding=1),
            ll.ConvBlock(in_features, in_features, 4, stride=1, padding=0),
            ll.Conv2d(in_features, 1, 1, stride=1, padding=0),
            nn.Flatten(),
        )

        self.optim = optim.Adam(self.parameters(), lr=self.lr, betas=self.betas, eps=self.eps)
        self.to(self.device)
Пример #2
0
    def __init__(self, normalize, stochastic, device):
        super(ComplexConv, self).__init__()
        self.device = device
        self.stochastic = stochastic

        if self.stochastic:
            args = [device, normalize]

            self.conv1 = layers.Conv2d(3, 64, 3, *args)
            self.conv2 = layers.Conv2d(64, 128, 3, *args)
            self.conv3 = layers.Conv2d(128, 256, 3, *args)
            self.pool = nn.AvgPool2d(2, 2)
            self.fc1 = layers.Linear(64 * 4 * 4, 128, *args)
            self.fc2 = layers.Linear(128, 256, *args)
        else:
            self.conv1 = nn.Conv2d(3, 64, 3)
            self.conv2 = nn.Conv2d(64, 128, 3)
            self.conv3 = nn.Conv2d(128, 256, 3)
            self.pool = nn.MaxPool2d(2, 2)
            self.fc1 = nn.Linear(64 * 4 * 4, 128)
            self.fc2 = nn.Linear(128, 256)

        self.classifier = nn.Linear(256, 10, bias=False)
        self.classifier.weight.requires_grad = False

        torch.nn.init.orthogonal_(self.classifier.weight)
Пример #3
0
    def __init__(self, num_inputs, action_space, model_train=True):
        self.model_train = model_train
        self.conv1 = layers.Conv2d(num_inputs,
                                   32,
                                   3,
                                   stride=2,
                                   padding=1,
                                   train=model_train)
        self.conv2 = layers.Conv2d(32,
                                   32,
                                   3,
                                   stride=2,
                                   padding=1,
                                   train=model_train)
        self.conv3 = layers.Conv2d(32,
                                   32,
                                   3,
                                   stride=2,
                                   padding=1,
                                   train=model_train)
        self.conv4 = layers.Conv2d(32,
                                   32,
                                   3,
                                   stride=2,
                                   padding=1,
                                   train=model_train)

        self.lstm = layers.LSTMCell(32 * 3 * 3, 256, train=model_train)

        num_outputs = action_space.n
        self.critic_linear = layers.Linear(256, 1, train=model_train)
        self.actor_linear = layers.Linear(256, num_outputs, train=model_train)

        # initial paramater
        self.conv1.init_weight(random=True)
        self.conv1.init_bias(random=False)
        self.conv2.init_weight(random=True)
        self.conv2.init_bias(random=False)
        self.conv3.init_weight(random=True)
        self.conv3.init_bias(random=False)
        self.conv4.init_weight(random=True)
        self.conv4.init_bias(random=False)

        self.critic_linear.init_weight(random=True)
        self.critic_linear.init_bias(random=False)
        self.actor_linear.init_weight(random=True)
        self.actor_linear.init_bias(random=False)

        self.lstm.init_weight(random=True)
        self.lstm.init_bias(random=False)

        # grad
        self.y1 = []
        self.y2 = []
        self.y3 = []
        self.y4 = []
Пример #4
0
    def __init__(self, batch_norm=True, batch_norm_alpha=0.1):
        super(Deep4NetWs, self).__init__()
        self.batch_norm = batch_norm
        self.batch_norm_alpha = batch_norm_alpha

        self.convnet = nn.Sequential(
            L.Conv2d(1, 25, kernel_size=(1, 10), stride=1),
            L.Conv2d(25,
                     25,
                     kernel_size=(62, 1),
                     stride=1,
                     bias=not self.batch_norm),
            L.BatchNorm2d(25),
            nn.ELU(),
            nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 2)),
            nn.Dropout(p=0.5),
            L.Conv2d(25,
                     50,
                     kernel_size=(1, 10),
                     stride=1,
                     bias=not self.batch_norm),
            L.BatchNorm2d(50),
            nn.ELU(),
            nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 2)),
            nn.Dropout(p=0.5),
            L.Conv2d(50,
                     100,
                     kernel_size=(1, 10),
                     stride=1,
                     bias=not self.batch_norm),
            L.BatchNorm2d(100),
            nn.ELU(),
            nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 2)),
            nn.Dropout(p=0.5),
            L.Conv2d(100, 100, kernel_size=(1, 10), stride=1),
            L.BatchNorm2d(100),
            nn.ELU(),
            nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 3)),
        )

        self.convnet.eval()

        out = self.convnet(
            np_to_var(np.ones((1, 1, 62, 400), dtype=np.float32)))
        n_out_time = out.cpu().data.numpy().shape[3]
        self.final_conv_length = n_out_time

        self.num_hidden = self.final_conv_length * 100
Пример #5
0
    def __init__(self, zdim, features, channels, lr, betas, eps, device='cpu'):
        super().__init__()
        self.step = 0
        self.alpha = 1.0
        self.device = device
        self.features = features
        self.channels = channels
        self.zdim = zdim
        self.lr, self.betas, self.eps = lr, betas, eps

        in_features = features * 2 ** (8 - max(3, self.step))
        out_features = features * 2 ** (8 - max(3, self.step + 1))        

        self.to_rgb = ll.Conv2d(out_features, self.channels, 1, stride=1, padding=0)
        self.prev_to_rgb = None
        self.blocks = nn.ModuleList()

        self.input_block = nn.Sequential(
            ll.ConvBlock(self.zdim, in_features, 4, stride=1, padding=3),
            ll.ConvBlock(in_features, in_features, 3, stride=1, padding=1),
            ll.ConvBlock(in_features, out_features, 3, stride=1, padding=1),
        )

        self.optim = optim.Adam(self.parameters(), lr=self.lr, betas=self.betas, eps=self.eps)
        self.to(self.device)
Пример #6
0
    def __init__(self, args, device='cuda'):
        super().__init__()
        self.args = args
        self.device = device
        self.img_channels = 3
        self.depths = [args.zdim, 256, 256, 256, 128, 128]
        self.didx = 0
        self.alpha = 1.

        # init G
        self.G = nn.ModuleList()
        blk = nn.ModuleList()
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 4, padding=3)) # to 4x4
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 3, padding=1))
        self.G.append(blk)
        self.toRGB = nn.ModuleList()
        self.toRGB.append(ll.Conv2d(self.depths[0], self.img_channels, 1, lrelu=False, pnorm=False)) # toRGB

        # init D
        self.fromRGB = nn.ModuleList()
        self.fromRGB.append(ll.Conv2d(self.img_channels, self.depths[0], 1)) # fromRGB
        self.D = nn.ModuleList()
        blk = nn.ModuleList()
        blk.append(ll.MinibatchStddev())
        blk.append(ll.Conv2d(self.depths[0]+1, self.depths[0], 3, padding=1))
        blk.append(ll.Conv2d(self.depths[0], self.depths[0], 4, stride=4)) # to 1x1
        blk.append(ll.Flatten())
        blk.append(ll.Linear(self.depths[0], 1))
        self.D.append(blk)

        self.doubling = nn.Upsample(scale_factor=2)
        self.halving = nn.AvgPool2d(2, 2)
        self.set_optimizer() # 
        self.criterion = losses.GANLoss(loss_type=args.loss_type, device=device)
        self.loss_type = args.loss_type
Пример #7
0
    def add_scale(self, increase_idx=True):
        if increase_idx:
            self.didx += 1
        blk = nn.ModuleList()
        blk.append(nn.Upsample(scale_factor=2))
        blk.append(
            ll.Conv2d(self.depths[self.didx-1], self.depths[self.didx], 3, padding=1)
        )
        blk.append(
            ll.Conv2d(self.depths[self.didx], self.depths[self.didx], 3, padding=1)
        )
        self.G.append(blk)
        self.toRGB.append(ll.Conv2d(self.depths[self.didx], self.img_channels, 1, lrelu=False, pnorm=False)) # toRGB

        self.fromRGB.append(ll.Conv2d(self.img_channels, self.depths[self.didx], 1)) # fromRGB
        blk = nn.ModuleList()
        blk.append(
            ll.Conv2d(self.depths[self.didx], self.depths[self.didx], 3, padding=1)
        )
        blk.append(
            ll.Conv2d(self.depths[self.didx], self.depths[self.didx-1], 3, padding=1)
        )
        blk.append(
            nn.AvgPool2d(2, stride=2)
        )
        self.D.append(blk)
        self.to(self.device)
        self.set_optimizer()
        self.set_alpha(0.)
Пример #8
0
    def __init__(self, normalize, stochastic, device):
        super(LeNet5, self).__init__()
        self.stochastic = stochastic
        if stochastic:
            args = [device, normalize]

            # from linked paper top of page 4 and section 2.2
            module_list = [
                layers.Conv2d(1, 6, 5, *args),
                nn.AvgPool2d(2),
                layers.Conv2d(6, 16, 5, *args),
                nn.AvgPool2d(2),
                layers.Conv2d(16, 120, 5, *args),
                layers.Linear(120, 84, *args)
            ]
            self.linear_layer = nn.Linear(84, 10, bias=False)
            torch.nn.init.orthogonal_(self.linear_layer.weight)
            if stochastic:
                self.linear_layer.weight.requires_grad = False

        else:
            module_list = [
                nn.Conv2d(1, 6, 5),
                nn.Tanh(),
                nn.AvgPool2d(2),
                nn.Tanh(),
                nn.Conv2d(6, 16, 5),
                nn.Tanh(),
                nn.AvgPool2d(2),
                nn.Tanh(),
                nn.Conv2d(16, 120, 5),
                nn.Tanh(),
                nn.Linear(120, 84),
            ]
            self.linear_layer = nn.Linear(84, 10, bias=False)

        self.layers = nn.ModuleList(module_list)
Пример #9
0
    def __init__(self, normalize, stochastic, device):
        super(SimpleConv, self).__init__()
        self.device = device
        self.stochastic = stochastic

        if self.stochastic:
            args = [device, normalize]

            self.conv1 = layers.Conv2d(3, 6, 5, *args)
            self.conv2 = layers.Conv2d(6, 16, 5, *args)
            self.fc1 = layers.Linear(16 * 5 * 5, 120, *args)
            self.fc2 = layers.Linear(120, 84, *args)
        else:
            self.conv1 = nn.Conv2d(3, 6, 5)
            self.conv2 = nn.Conv2d(6, 16, 5)
            self.fc1 = nn.Linear(16 * 5 * 5, 120)
            self.fc2 = nn.Linear(120, 84)

        self.classifier = nn.Linear(84, 10, bias=False)

        if self.stochastic:
            self.classifier.weight.requires_grad = False

        torch.nn.init.orthogonal_(self.classifier.weight)
Пример #10
0
    def add_scale(self):
        self.step += 1

        in_features = self.features * 2 ** (8 - max(3, self.step + 1))
        out_features = self.features * 2 ** (8 - max(3, self.step)) 

        self.prev_from_rgb = self.from_rgb
        self.from_rgb = ll.Conv2d(self.channels, in_features, 1, stride=1, padding=0)
        
        new_block = nn.Sequential(
            ll.ConvBlock(in_features, in_features, 3, stride=1, padding=1),
            ll.ConvBlock(in_features, out_features, 3, stride=1, padding=1),
        )
        self.blocks.insert(0, new_block)

        self.optim = optim.Adam(self.parameters(), lr=self.lr, betas=self.betas, eps=self.eps)

        self.to(self.device)
Пример #11
0
    def build(self, X):
        """
        Build the graph of network:
        ----------
        Args:
            X: Tensor, [1, height, width, 3]
        Returns:
            logits: Tensor, predicted annotated image flattened 
                                  [1 * height * width,  num_classes]
        """

        dropout_prob = tf.where(True, 0.2, 1.0)

        # Left Side
        down_1_conv_1 = layers.Conv2d(X, [3, 3], 64, 'down_1_conv_1')
        down_1_conv_2 = layers.Conv2d(down_1_conv_1, [3, 3], 64,
                                      'down_1_conv_2')
        down_1_pool = layers.Maxpool(down_1_conv_2, [2, 2], 'down_1_pool')

        down_2_conv_1 = layers.Conv2d(down_1_pool, [3, 3], 128,
                                      'down_2_conv_1')
        down_2_conv_2 = layers.Conv2d(down_2_conv_1, [3, 3], 128,
                                      'down_2_conv_2')
        down_2_pool = layers.Maxpool(down_2_conv_2, [2, 2], 'down_2_pool')

        down_3_conv_1 = layers.Conv2d(down_2_pool, [3, 3], 256,
                                      'down_3_conv_1')
        down_3_conv_2 = layers.Conv2d(down_3_conv_1, [3, 3], 256,
                                      'down_3_conv_2')
        down_3_pool = layers.Maxpool(down_3_conv_2, [2, 2], 'down_3_pool')
        down_3_drop = layers.Dropout(down_3_pool, dropout_prob, 'down_3_drop')

        down_4_conv_1 = layers.Conv2d(down_3_drop, [3, 3], 512,
                                      'down_4_conv_1')
        down_4_conv_2 = layers.Conv2d(down_4_conv_1, [3, 3], 512,
                                      'down_4_conv_2')
        down_4_pool = layers.Maxpool(down_4_conv_2, [2, 2], 'down_4_pool')
        down_4_drop = layers.Dropout(down_4_pool, dropout_prob, 'down_4_drop')

        down_5_conv_1 = layers.Conv2d(down_4_drop, [3, 3], 1024,
                                      'down_5_conv_1')
        down_5_conv_2 = layers.Conv2d(down_5_conv_1, [3, 3], 1024,
                                      'down_5_conv_2')
        down_5_drop = layers.Dropout(down_5_conv_2, dropout_prob,
                                     'down_5_drop')

        # Right Side
        up_6_deconv = layers.Deconv2d(down_5_drop, 2, 'up_6_deconv')
        up_6_concat = layers.Concat(up_6_deconv, down_4_conv_2, 'up_6_concat')
        up_6_conv_1 = layers.Conv2d(up_6_concat, [3, 3], 512, 'up_6_conv_1')
        up_6_conv_2 = layers.Conv2d(up_6_conv_1, [3, 3], 512, 'up_6_conv_2')
        up_6_drop = layers.Dropout(up_6_conv_2, dropout_prob, 'up_6_drop')

        up_7_deconv = layers.Deconv2d(up_6_drop, 2, 'up_7_deconv')
        up_7_concat = layers.Concat(up_7_deconv, down_3_conv_2, 'up_7_concat')
        up_7_conv_1 = layers.Conv2d(up_7_concat, [3, 3], 256, 'up_7_conv_1')
        up_7_conv_2 = layers.Conv2d(up_7_conv_1, [3, 3], 256, 'up_7_conv_2')
        up_7_drop = layers.Dropout(up_7_conv_2, dropout_prob, 'up_7_drop')

        up_8_deconv = layers.Deconv2d(up_7_drop, 2, 'up_8_deconv')
        up_8_concat = layers.Concat(up_8_deconv, down_2_conv_2, 'up_8_concat')
        up_8_conv_1 = layers.Conv2d(up_8_concat, [3, 3], 128, 'up_8_conv_1')
        up_8_conv_2 = layers.Conv2d(up_8_conv_1, [3, 3], 128, 'up_8_conv_2')

        up_9_deconv = layers.Deconv2d(up_8_conv_2, 2, 'up_9_deconv')
        up_9_concat = layers.Concat(up_9_deconv, down_1_conv_2, 'up_9_concat')
        up_9_conv_1 = layers.Conv2d(up_9_concat, [3, 3], 64, 'up_9_conv_1')
        up_9_conv_2 = layers.Conv2d(up_9_conv_1, [3, 3], 64, 'up_9_conv_2')

        score = layers.Conv2d(up_9_conv_2, [1, 1], 1, 'score')
        logits = tf.reshape(score, (-1, 1))

        return logits
Пример #12
0
    def __init__(
        self,
        bottom_up: M.Module,
        in_features: List[str],
        out_channels: int = 256,
        norm: str = None,
        top_block: M.Module = None,
        strides: List[int] = [8, 16, 32],
        channels: List[int] = [512, 1024, 2048],
    ):
        """
        Args:
            bottom_up (M.Module): module representing the bottom up sub-network.
                it generates multi-scale feature maps which formatted as a
                dict like {'res3': res3_feature, 'res4': res4_feature}
            in_features (list[str]): list of input feature maps keys coming
                from the `bottom_up` which will be used in FPN.
                e.g. ['res3', 'res4', 'res5']
            out_channels (int): number of channels used in the output
                feature maps.
            norm (str): the normalization type.
            top_block (nn.Module or None): the module build upon FPN layers.
        """
        super(FPN, self).__init__()

        in_strides = strides
        in_channels = channels
        norm = layers.get_norm(norm)

        use_bias = norm is None
        self.lateral_convs = list()
        self.output_convs = list()

        for idx, in_channels in enumerate(in_channels):
            lateral_norm = None if norm is None else norm(out_channels)
            output_norm = None if norm is None else norm(out_channels)

            lateral_conv = layers.Conv2d(
                in_channels,
                out_channels,
                kernel_size=1,
                bias=use_bias,
                norm=lateral_norm,
            )
            output_conv = layers.Conv2d(
                out_channels,
                out_channels,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=use_bias,
                norm=output_norm,
            )
            M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
            M.init.msra_normal_(output_conv.weight, mode="fan_in")

            if use_bias:
                M.init.fill_(lateral_conv.bias, 0)
                M.init.fill_(output_conv.bias, 0)

            stage = int(math.log2(in_strides[idx]))

            setattr(self, "fpn_lateral{}".format(stage), lateral_conv)
            setattr(self, "fpn_output{}".format(stage), output_conv)
            self.lateral_convs.insert(0, lateral_conv)
            self.output_convs.insert(0, output_conv)

        self.top_block = top_block
        self.in_features = in_features
        self.bottom_up = bottom_up

        # follow the common practices, FPN features are named to "p<stage>",
        # like ["p2", "p3", ..., "p6"]
        self._out_feature_strides = {
            "p{}".format(int(math.log2(s))): s
            for s in in_strides
        }

        # top block output feature maps.
        if self.top_block is not None:
            for s in range(stage, stage + self.top_block.num_levels):
                self._out_feature_strides["p{}".format(s + 1)] = 2**(s + 1)

        self._out_features = list(sorted(self._out_feature_strides.keys()))
        self._out_feature_channels = {
            k: out_channels
            for k in self._out_features
        }
Пример #13
0
    def __init__(
            self,
            dims=None,
            hyperparams=None,
            channels=None,
            r_seed=None,
            dropout_p=None,
    ):
        super(AutoregressiveVAE, self).__init__()

        self.dims = {
            "batch": 10,
            "alphabet": 21,
            "length": 256,
            "embedding_size": 1
        }
        if dims is not None:
            self.dims.update(dims)
        self.dims.setdefault('input', self.dims['alphabet'])

        self.hyperparams = {
            "encoder": {
                "channels": 48,
                "nonlinearity": "elu",
                "num_dilation_blocks": 3,
                "num_layers": 9,
                "dilation_schedule": None,
                "transformer": False,
                "inverse_temperature": False,
                "embedding_nnet_nonlinearity": "elu",
                "embedding_nnet_size": 200,
                "latent_size": 30,
                "dropout_p": 0.1,
                "dropout_type": "2D",
                "config": "updated",
            },
            "decoder": {
                "channels": 48,
                "nonlinearity": "elu",
                "num_dilation_blocks": 3,
                "num_layers": 9,
                "dilation_schedule": None,
                "transformer": False,
                "inverse_temperature": False,
                "positional_embedding": True,
                "skip_connections": False,  # TODO test effect of skip connections
                "pos_emb_max_len": 400,
                "pos_emb_step": 5,
                "config": "updated",
                "dropout_type": "2D",
                "dropout_p": 0.5,
            },
            "sampler_hyperparams": {
                'warm_up': 10000,
                'annealing_type': 'linear',
                'anneal_kl': True,
                'anneal_noise': True
            },
            "embedding_hyperparams": {
                'warm_up': 10000,
                'annealing_type': 'piecewise_linear',
                'anneal_kl': True,
                'anneal_noise': True
            },
            "random_seed": 42,
            "optimization": {
                "l2_regularization": True,
                "bayesian": True,
                "l2_lambda": 1.,
                "bayesian_logits": False,
                "mle_logits": False,
            }
        }
        if hyperparams is not None:
            recursive_update(self.hyperparams, hyperparams)
        if channels is not None:
            self.hyperparams['encoder']['channels'] = channels
        if dropout_p is not None:
            self.hyperparams['decoder']['dropout_p'] = dropout_p
        if r_seed is not None:
            self.hyperparams['random_seed'] = r_seed

        # initialize encoder modules
        enc_params = self.hyperparams['encoder']
        nonlin = nonlinearity(enc_params['nonlinearity'])

        self.encoder = nn.ModuleDict()
        self.encoder.start_conv = layers.Conv2d(
            self.dims['input'],
            enc_params['channels'],
            kernel_width=(1, 1),
            activation=nonlin,
        )

        self.encoder.dilation_blocks = nn.ModuleList()
        for block in range(enc_params['num_dilation_blocks']):
            self.encoder.dilation_blocks.append(layers.ConvNet1D(
                channels=enc_params['channels'],
                layers=enc_params['num_layers'],
                dropout_p=enc_params['dropout_p'],
                dropout_type=enc_params['dropout_type'],
                causal=False,
                config=enc_params['config'],
                dilation_schedule=enc_params['dilation_schedule'],
                transpose=False,
                nonlinearity=nonlin,
            ))

        self.encoder.emb_mu_one = nn.Linear(enc_params['channels'], enc_params['embedding_nnet_size'])
        self.encoder.emb_log_sigma_one = nn.Linear(enc_params['channels'], enc_params['embedding_nnet_size'])
        self.encoder.emb_mu_out = nn.Linear(enc_params['embedding_nnet_size'], enc_params['latent_size'])
        self.encoder.emb_log_sigma_out = nn.Linear(enc_params['embedding_nnet_size'], enc_params['latent_size'])
        # TODO try adding flow

        # initialize decoder modules
        dec_params = self.hyperparams['decoder']
        nonlin = nonlinearity(dec_params['nonlinearity'])

        if dec_params['positional_embedding']:
            max_len = dec_params['pos_emb_max_len']
            step = dec_params['pos_emb_step']
            rbf_locations = torch.arange(step, max_len+1, step, dtype=torch.float32)
            rbf_locations = rbf_locations.view(1, dec_params['pos_emb_max_len'] // dec_params['pos_emb_step'], 1, 1)
            self.register_buffer('rbf_locations', rbf_locations)
        else:
            self.register_buffer('rbf_locations', None)

        self.decoder = nn.ModuleDict()
        self.decoder.start_conv = layers.Conv2d(
            (
                self.dims['input'] +
                (
                    dec_params['pos_emb_max_len'] // dec_params['pos_emb_step']
                    if dec_params['positional_embedding'] else 0
                ) +
                enc_params['latent_size']
            ),
            dec_params['channels'],
            kernel_width=(1, 1),
            activation=nonlin,
        )

        self.decoder.dilation_blocks = nn.ModuleList()
        for block in range(dec_params['num_dilation_blocks']):
            self.decoder.dilation_blocks.append(layers.ConvNet1D(
                channels=dec_params['channels'],
                layers=dec_params['num_layers'],
                add_input_channels=enc_params['channels'] if dec_params['skip_connections'] else 0,
                add_input_layer='all' if dec_params['skip_connections'] else None,
                dropout_p=dec_params['dropout_p'],
                dropout_type=dec_params['dropout_type'],
                causal=True,
                config=dec_params['config'],
                dilation_schedule=dec_params['dilation_schedule'],
                transpose=False,
                nonlinearity=nonlin,
            ))

        self.decoder.end_conv = layers.Conv2d(
            dec_params['channels'],
            self.dims['alphabet'],
            kernel_width=(1, 1),
            g_init=0.1,
            activation=None,
        )

        self.step = 0
        self.forward_state = {'kl_embedding': None}
        self.image_summaries = {}
        self._enable_gradient = 'ed'
Пример #14
0
    def __init__(
            self,
            dims=None,
            hyperparams=None,
            channels=None,
            r_seed=None,
            dropout_p=None,
    ):
        super(Autoregressive, self).__init__()

        self.dims = {
            "batch": 10,
            "alphabet": 21,
            "length": 256,
            "embedding_size": 1
        }
        if dims is not None:
            self.dims.update(dims)
        self.dims.setdefault('input', self.dims['alphabet'])

        self.hyperparams = {
            # For purely dilated conv network
            "encoder": {
                "channels": 48,
                "nonlinearity": "elu",
                "num_dilation_blocks": 6,
                "num_layers": 9,
                "dilation_schedule": None,
                "transformer": False,  # TODO transformer
                "inverse_temperature": False,
                "dropout_loc": "inter",  # options = "final", "inter", "gaussian"
                "dropout_p": 0.5,  # probability of zeroing out value, not the keep probability
                "dropout_type": "independent",
                "config": "original",  # options = "original", "updated", "standard"
            },
            "sampler_hyperparams": {
                'warm_up': 1,
                'annealing_type': 'linear',
                'anneal_kl': True,
                'anneal_noise': True
            },
            "embedding_hyperparams": {
                'warm_up': 1,
                'annealing_type': 'linear',
                'anneal_kl': True,
                'anneal_noise': False
            },
            "random_seed": 42,
            "optimization": {
                "l2_regularization": True,
                "bayesian": False,  # TODO implement bayesian
                "l2_lambda": 1.,
                "bayesian_logits": False,
                "mle_logits": False,
            }
        }
        if hyperparams is not None:
            recursive_update(self.hyperparams, hyperparams)
        if channels is not None:
            self.hyperparams['encoder']['channels'] = channels
        if dropout_p is not None:
            self.hyperparams['encoder']['dropout_p'] = dropout_p
        if r_seed is not None:
            self.hyperparams['random_seed'] = r_seed

        # initialize encoder modules
        enc_params = self.hyperparams['encoder']
        nonlin = nonlinearity(enc_params['nonlinearity'])

        self.start_conv = layers.Conv2d(
            self.dims['input'],
            enc_params['channels'],
            kernel_width=(1, 1),
            activation=None,
        )

        self.dilation_blocks = nn.ModuleList()
        for block in range(enc_params['num_dilation_blocks']):
            self.dilation_blocks.append(layers.ConvNet1D(
                channels=enc_params['channels'],
                layers=enc_params['num_layers'],
                dropout_p=enc_params['dropout_p'],
                dropout_type=enc_params['dropout_type'],
                causal=True,
                config=enc_params['config'],
                dilation_schedule=enc_params['dilation_schedule'],
                transpose=False,
                nonlinearity=nonlin,
            ))

        if enc_params['dropout_loc'] == "final":
            self.final_dropout = nn.Dropout(max(enc_params['dropout_p']-0.3, 0.))
        else:
            self.register_parameter('final_dropout', None)

        self.end_conv = layers.Conv2d(
            enc_params['channels'],
            self.dims['alphabet'],
            kernel_width=(1, 1),
            g_init=0.1,
            activation=None,
        )

        self.step = 0
        self.image_summaries = {}

        self.generating = False
        self.generating_reset = True