Exemple #1
0
    def initLayers(self, params):
        bitwidths, kernel_sizes, nClasses = params
        bitwidths = bitwidths.copy()

        layersPlanes = self.initLayersPlanes()

        # init previous layer
        prevLayer = None

        self.maxpool = nn.MaxPool2d(
            kernel_size=3, stride=2,
            padding=1) if self.dataset == 'imagenet' else lambda x: x
        # create list of layers from layersPlanes
        # supports bitwidth as list of ints, i.e. same bitwidths to all layers
        # supports bitwidth as list of lists, i.e. specific bitwidths to each layer
        layers = ModuleList()
        for i, (layerType, in_planes, out_planes,
                input_size) in enumerate(layersPlanes):
            # build layer
            kernel_sizes_tmp = kernel_sizes
            if layerType == self.createMixedLayer and self.dataset == 'imagenet':
                kernel_sizes_tmp = [7]
                l = layerType(bitwidths, in_planes, out_planes,
                              kernel_sizes_tmp, 2, input_size, prevLayer)
            else:
                l = layerType(bitwidths, in_planes, out_planes,
                              kernel_sizes_tmp, 1, input_size, prevLayer)
            # add layer to layers list
            layers.append(l)
            # remove layer specific bitwidths, in case of different bitwidths to layers
            # if isinstance(bitwidths[0], list):
            #     nMixedOpLayers = 1 if isinstance(l, MixedFilter) \
            #         else sum(1 for _, m in l._modules.items() if isinstance(m, MixedFilter))
            #     del bitwidths[:nMixedOpLayers]
            # # update previous layer
            # prevLayer = l.outputLayer()

        self.avgpool = AvgPool2d(7 if self.dataset == 'imagenet' else 4)
        # self.fc = MixedLinear(bitwidths, 64, 10)
        self.fc = Linear(512, nClasses).cuda()

        return layers
Exemple #2
0
class DBN(Module):
    def __init__(self):
        super(DBN, self).__init__()
        self._layers = ModuleList()

    def append(self, *models):
        for model in models:
            if len(self._layers) > 0 and  \
                model.visible_units != self._layers[-1].hidden_units:
                raise Exception("Bad dimentions")

            self._layers.append(model)

    def sample_h_given_v(self, visible, k=None):
        x = visible
        if k is None:
            k = len(self._layers)
        for i in range(0, k):
            module = self._layers[i]
            x = module.sample_h_given_v(x)
        return x

    def sample_v_given_h(self, hidden, k=-1):
        x = hidden
        for i in range(len(self._layers)-1, k, -1):
            module = self._layers[i]
            x = module.sample_v_given_h(x)
        return x

    def reconstruct(self, visible, k=1):
        v_sample = visible
        for _ in range(k):
            h_sample = self.sample_h_given_v(v_sample)
            v_sample = self.sample_v_given_h(h_sample)
        return v_sample

    def nr_layers(self):
        return len(self._layers)

    @property
    def layers(self):
        return self._layers
Exemple #3
0
class modrnn(torch.nn.Module):  #resnet
    def __init__(self,input_size,hidden_size,num_layers,l):
        super(modrnn, self).__init__()
        self.rnnlist=ModuleList()
        for i in range(l):
            self.rnnlist.append(\
   period(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers))
        self.l=l    
        self.fc=Sequential(BatchNorm1d(hidden_size*2*l),Linear(hidden_size*2*l,l),ReLU(),\
            BatchNorm1d(l),Linear(l,1),Sigmoid())
    def forward(self, x ):#x.size()=batch,len,channel(4)
        if self.l==1:
            y=self.rnnlist[0](x)
        else:
            y=self.rnnlist[0](x[:,0,:,:])
        for j in range(1,self.l):
            y=torch.cat((y,self.rnnlist[j](x[:,j,:,:])),dim=-1 )
        z=self.fc(y)
        
        return z#y.size()=batch,hidden_size*2*self.l
Exemple #4
0
class GCN(TorchKeras):
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        self.layers = ModuleList()

        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = GraphConv(inc,
                              hid,
                              activation=get_activation(act),
                              bias=use_bias)
            self.layers.append(layer)
            inc = hid
        # output layer
        self.layers.append(GraphConv(inc, out_channels))

        self.dropout = Dropout(p=dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])

    def forward(self, x, g):

        for i, layer in enumerate(self.layers):
            if i != 0:
                x = self.dropout(x)
            x = layer(g, x)

        return x
Exemple #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 tperc=0.45,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        super().__init__()

        layers = ModuleList()
        paras = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = TrimmedConvolution(inc,
                                       hid,
                                       activation=act,
                                       use_bias=use_bias,
                                       tperc=tperc)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hid

        layer = TrimmedConvolution(inc,
                                   out_channels,
                                   use_bias=use_bias,
                                   tperc=tperc)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
        self.dropout = Dropout(dropout)
        self.layers = layers
Exemple #6
0
        def initBlocks(self, params, countFlopsFlag):
            widthRatioList, nClasses, input_size, partition = params

            blocksPlanes = self.initBlocksPlanes()

            # init parameters
            kernel_size = 7
            stride = 2

            # create list of blocks from blocksPlanes
            blocks = ModuleList()
            # output size is divided by 2 due to maxpool after 1st conv layer
            prevLayer = Input(3, int(input_size / 2))

            for i, (blockType, out_planes) in enumerate(blocksPlanes):
                # increase number of out_planes
                out_planes *= 4
                # copy width ratio list
                layerWidthRatioList = widthRatioList.copy()
                # add partition ratio if exists
                if partition:
                    layerWidthRatioList += [partition[i]]
                # build layer
                l = blockType(layerWidthRatioList, out_planes, kernel_size,
                              stride, prevLayer, countFlopsFlag)
                # update kernel size
                kernel_size = 3
                # update stride
                stride = 1
                # add layer to blocks list
                blocks.append(l)
                # update previous layer
                prevLayer = l.outputLayer()

            self.maxpool = MaxPool2d(kernel_size=kernel_size,
                                     stride=2,
                                     padding=1)
            self.avgpool = AvgPool2d(7)
            self.fc = Linear(1024, nClasses).cuda()

            return blocks
Exemple #7
0
class NetRelu(Module):
    def __init__(self, layers):
        super(NetRelu, self).__init__()
        self.act = ReLU()
        self.layers = layers

        self.fcs = ModuleList()

        for i in range(len(self.layers) - 1):
            #print(self.layers[i], self.layers[i+1])
            layer = SelfLinear(self.layers[i], self.layers[i + 1])
            init.kaiming_normal_(layer.weight)
            #init.xavier_normal_(layer.weight)
            self.fcs.append(layer)

    def forward(self, X):
        for fc in self.fcs[:-1]:
            X = fc(X)
            X = self.act(X)
        X = self.fcs[-1](X)
        return X
Exemple #8
0
class Sequence(Module):
    def __init__(self, *layers):
        super().__init__()
        self.layers = ModuleList()
        for layer in layers:
            assert isinstance(layer, Module)
            self.layers.append(layer)

    def forward_inner(self, x, x_loss):
        for layer in self.layers:
            x, x_loss = layer(x, x_loss)
        return x, x_loss

    def summary_inner(self, num_percentiles):
        xx = []
        for layer in self.layers:
            x = layer.summary(num_percentiles)
            xx.append(x)
        return {
            'layers': xx,
        }
Exemple #9
0
class ConvolutionStack(nn.Module):
    def __init__(self,in_chans,final_relu=True,padding=1):
        super(ConvolutionStack, self).__init__()
        self.convs = ModuleList()
        self.batchnorms = ModuleList()
        self.in_chans = in_chans
        self.final_relu = final_relu
        self.padding = padding

    def append(self,out_chans,filter_size,stride):
        if len(self.convs)==0:
            self.convs.append(nn.Conv2d(self.in_chans, out_chans, filter_size, stride=stride, padding=self.padding))
        else:
            self.convs.append(nn.Conv2d(self.convs[-1].out_channels, out_chans, filter_size, stride=stride, padding=self.padding))
        self.batchnorms.append(nn.BatchNorm2d(out_chans))

    def get_output_dims(self):
        return self.output_dims

    def forward(self, x):
        self.output_dims = []

        for i,c in enumerate(self.convs):
            # lrelu = nn.LeakyReLU(0.2,inplace=True)
            # x = lrelu(c(x))
            x = c(x)
            x = self.batchnorms[i](x)
            if i<len(self.convs)-1 or self.final_relu:
                x = F.relu(x)
            self.output_dims.append(x.size())
        return x
Exemple #10
0
class Net(Module):
    def __init__(self, input_size, hidden_sizes, output_size, lr):
        super(Net, self).__init__()
        max_potential = 3.0
        self.layers = ModuleList()

        linear = Linear(input_size, hidden_sizes[0])
        self.layers.append(linear)

        for i in range(len(hidden_sizes)):

            layer = Integrating(hidden_sizes[i], hidden_sizes[i],
                                max_potential)
            self.layers.append(layer)

        # self.layers.append(Integrating(hidden_sizes[-1], output_size, max_potential, self.layers[-1]))
        self.layers.append(Linear(hidden_sizes[-1], output_size))
        self.loss = CrossEntropyLoss()

        self.optimizer = torch.optim.Adam(self.layers.parameters(), lr=lr)

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

    def reset_potentials(self):
        for layer in self.layers:
            if type(layer).__name__ == "Integrating":
                layer.reset_potentials()
Exemple #11
0
class CNV_hardware(Module):
    def __init__(self,
                 num_classes=10,
                 weight_bit_width=None,
                 act_bit_width=None,
                 in_bit_width=None,
                 in_ch=3,
                 device="cpu"):
        super(CNV_hardware, self).__init__()
        self.device = device

        weight_quant_type = commons.get_quant_type(weight_bit_width)
        act_quant_type = commons.get_quant_type(act_bit_width)
        in_quant_type = commons.get_quant_type(in_bit_width)
        stats_op = commons.get_stats_op(weight_quant_type)

        self.linear_features = ModuleList()

        # fully connected layers
        self.linear_features.append(
            commons.get_act_quant(in_bit_width, in_quant_type))

        for in_features, out_features in INTERMEDIATE_FC_FEATURES:
            self.linear_features.append(
                commons.get_quant_linear(
                    in_features=in_features,
                    out_features=out_features,
                    per_out_ch_scaling=INTERMEDIATE_FC_PER_OUT_CH_SCALING,
                    bit_width=weight_bit_width,
                    quant_type=weight_quant_type,
                    stats_op=stats_op))
            self.linear_features.append(BatchNorm1d(out_features))
            self.linear_features.append(
                commons.get_act_quant(act_bit_width, act_quant_type))

        # last layer
        self.fc = commons.get_quant_linear(
            in_features=LAST_FC_IN_FEATURES,
            out_features=num_classes,
            per_out_ch_scaling=LAST_FC_PER_OUT_CH_SCALING,
            bit_width=weight_bit_width,
            quant_type=weight_quant_type,
            stats_op=stats_op)

    def forward(self, x):
        for mod in self.linear_features:
            x = mod(x)
        out = self.fc(x)

        return out
Exemple #12
0
    def __init__(self, in_channel, out_channel, bn, dr_p, no_tail=False):
        super(LinearBlock, self).__init__()

        mylist = ModuleList()
        mylist.append(Linear(in_channel, out_channel))
        if no_tail == False:
            if bn == 1:
                mylist.append(BatchNorm1d(out_channel))

            mylist.append(ReLU())
            if dr_p > 0:
                mylist.append(Dropout(dr_p))

        self.block = Sequential(*mylist)
Exemple #13
0
class TransposedConvolutionStack(nn.Module):
    def __init__(self, in_chans, final_relu=True, padding=1):
        super(TransposedConvolutionStack, self).__init__()
        self.convs = ModuleList()
        self.batchnorms = ModuleList()
        self.in_chans = in_chans
        self.output_dims = []
        self.final_relu = final_relu
        self.padding = padding

    def append(self, out_chans, filter_size, stride):
        if len(self.convs) == 0:
            self.convs.append(
                nn.ConvTranspose2d(self.in_chans,
                                   out_chans,
                                   filter_size,
                                   stride=stride,
                                   padding=self.padding))
        else:
            self.convs.append(
                nn.ConvTranspose2d(self.convs[-1].out_channels,
                                   out_chans,
                                   filter_size,
                                   stride=stride,
                                   padding=self.padding))
        self.batchnorms.append(nn.BatchNorm2d(out_chans))

    def forward(self, x, output_dims=[]):
        # print self.convs
        for i, c in enumerate(self.convs):
            x = c(x, output_size=output_dims[i]) if output_dims else c(x)
            x = self.batchnorms[i](x)
            if i < len(self.convs) - 1 or self.final_relu:
                x = F.relu(x)
        return x
Exemple #14
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[],
                 activations=[],
                 dropout=0.5,
                 weight_decay=5e-5,
                 lr=0.2,
                 use_bias=False):
        super().__init__()

        if len(hiddens) != len(activations):
            raise RuntimeError(
                f"Arguments 'hiddens' and 'activations' should have the same length."
                " Or you can set both of them to `[]`.")

        layers = ModuleList()
        acts = []
        paras = []
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = Linear(inc, hidden, bias=use_bias)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            layers.append(layer)
            inc = hidden
            acts.append(get_activation(activation))

        layer = Linear(inc, out_channels, bias=use_bias)
        layers.append(layer)
        paras.append(dict(params=layer.parameters(),
                          weight_decay=weight_decay))

        self.layers = layers
        self.acts = acts
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Exemple #15
0
class GraphEmbedding(Module):
    """Embed features to initialize node and edge representations in a graph."""

    _logger = getLogger(__name__)

    def __init__(self, dimensions: List[int],
                 vocabularies: List[Vocabulary]) -> None:
        """Construct a graph embedding layer."""
        super().__init__()
        self.dimensions = dimensions
        self.vocabularies = vocabularies
        self.embeddings = ModuleList()
        self._configured = False

    def forward(self, graph: DGLGraph,
                features: List[Any]) -> DGLGraph:  # type: ignore
        """Embed features to initialize node representations of the graph."""
        if not self._configured:
            self._configure(features)
        graph.ndata["x"] = cat(
            tensors=[
                emb(*feature) if isinstance(feature, tuple) else emb(feature)
                for emb, feature in zip(self.embeddings,
                                        features)  # type: ignore
            ],
            dim=1,
        )
        return graph

    def _configure(self, example_features: List[Any]) -> None:
        for dimension, vocabulary, feature in zip(self.dimensions,
                                                  self.vocabularies,
                                                  example_features):
            embedding_class = EmbeddingBag if isinstance(feature,
                                                         tuple) else Embedding
            self.embeddings.append(
                embedding_class(num_embeddings=len(vocabulary),
                                embedding_dim=dimension))
            self._configured = True
class ConditionalDecoder(NeuralNetwork):
    def __init__(self, ll_scaling=1.0, dim_z=latent_dim):
        super(ConditionalDecoder, self).__init__()
        self.dim_z = dim_z
        ngf = 32
        self.init = genUpsample(self.dim_z, ngf * 16, 1, 0)
        self.embedding = Sequential(
            Linear(labels_dim, self.dim_z),
            BatchNorm1d(self.dim_z, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
        )
        self.dense_init = Sequential(
            Linear(self.dim_z * 2, self.dim_z),
            BatchNorm1d(self.dim_z, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
        )
        self.m_modules = ModuleList()  # to 4x4
        self.c_modules = ModuleList()
        for i in range(4):
            self.m_modules.append(
                genUpsample2(ngf * 2**(4 - i), ngf * 2**(3 - i), 3))
            self.c_modules.append(
                Sequential(
                    Conv2d(ngf * 2**(3 - i), colors_dim, 3, 1, 1, bias=False),
                    Tanh()))
        self.set_optimizer(optimizer,
                           lr=learning_rate * ll_scaling,
                           betas=betas)

    def forward(self, latent, labels, step=3):
        y = self.embedding(labels)
        out = cat((latent, y), dim=1)
        out = self.dense_init(out)
        out = out.unsqueeze(2).unsqueeze(3)
        out = self.init(out)
        for i in range(step):
            out = self.m_modules[i](out)
        out = self.c_modules[step](self.m_modules[step](out))
        return out
Exemple #17
0
class GCN(TorchKerasModel):

    def __init__(self, in_channels, out_channels, 
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4, 
                 lr=0.01, use_bias=False):

        super().__init__()

        self.layers = ModuleList()
        paras = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GraphConvolution(inc, hidden, activation=activation, use_bias=use_bias)
            self.layers.append(layer)
            paras.append(dict(params=layer.parameters(), weight_decay=l2_norm))
            inc = hidden

        layer = GraphConvolution(inc, out_channels, use_bias=use_bias)
        self.layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))
        
        self.dropout = Dropout(dropout)
        self.optimizer = optim.Adam(paras, lr=lr)
        self.loss_fn = torch.nn.CrossEntropyLoss()

    def forward(self, inputs):
        x, adj, idx = inputs

        for layer in self.layers:
            x = self.dropout(x)
            x = layer([x, adj])
            
        return x[idx]
Exemple #18
0
class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()

        self.node_emb = Embedding(21, 75)
        self.edge_emb = Embedding(4, 50)

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(4):
            conv = PNAConv(in_channels=75,
                           out_channels=75,
                           aggregators=aggregators,
                           scalers=scalers,
                           deg=deg,
                           edge_dim=50,
                           towers=5,
                           pre_layers=1,
                           post_layers=1,
                           divide_input=False)
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(75))

        self.mlp = Sequential(Linear(75, 50), ReLU(), Linear(50, 25), ReLU(),
                              Linear(25, 1))

    def forward(self, x, edge_index, edge_attr, batch):
        x = self.node_emb(x.squeeze())
        edge_attr = self.edge_emb(edge_attr)

        for conv, batch_norm in zip(self.convs, self.batch_norms):
            x = F.relu(batch_norm(conv(x, edge_index, edge_attr)))

        x = global_add_pool(x, batch)
        return self.mlp(x)
Exemple #19
0
class SVGD_simple(BNN_SVGD):
    def __init__(self,
                 x_dim,
                 y_dim,
                 num_networks=16,
                 network_structure=[32],
                 ll_sigma=1,
                 p_sigma=1,
                 rbf_sigma=1,
                 step_size=0.01):
        """

        :param x_dim: dimension of input
        :param y_dim: dimension of output
        :param num_networks: number of neural networks (or particles)
        :param network_structure: hidden layer structure
        :param ll_sigma: standard deviation of likelihood term
        :param p_sigma:  standard deviation of prior term
        :param rbf_sigma: rbf length scale
        """
        super(SVGD_simple, self).__init__(ll_sigma, p_sigma, rbf_sigma)

        self.num_nn = num_networks
        self.nn_arch = network_structure
        self.nns = ModuleList()
        self.x_dim = x_dim
        self.y_dim = y_dim

        # Initialize all the neural networks, note that we use SingleWeightNeuralNet for experimentation
        # purpose only
        self.bias = False
        for _ in range(num_networks):
            zi = SingleWeightNeuralNet(x_dim, y_dim)
            self.nns.append(zi)

        self.step_size = step_size
        self.svgd_optimizer = optim.SGD(self.parameters(), lr=self.step_size)
        self.prior_factor = 1
Exemple #20
0
class NN(Module):
    def __init__(self, num_layers, num_nodes, activation):
        super(NN, self).__init__()

        # Specify list of layer sizes
        sizes = [3] + [num_nodes] * num_layers + [1]
        in_sizes, out_sizes = sizes[:-1], sizes[1:]

        # Construct linear layers
        self.linears = ModuleList()
        for n_in, n_out in zip(in_sizes, out_sizes):
            self.linears.append(Linear(n_in, n_out))

        # Specify activation function
        self.activation = activation

    def forward(self, x):

        for l in self.linears[:-1]:
            x = self.activation(l(x))
        x = self.linears[-1](x)

        return x
Exemple #21
0
class _Transformer(nn.Sequential):
    def __init__(
        self,
        img_size,
        patch_size,
        in_feats,
        embed_size,
        num_heads,
        mlp_dim,
        num_layers,
        out_layers,
        Conv,
        Norm,
    ):
        super().__init__()
        self.out_layers = out_layers
        self.num_layers = num_layers
        self.embed = _Embedding(img_size, patch_size, in_feats, embed_size, Conv)
        self.layers = ModuleList([])

        for _ in range(0, num_layers):
            layer = _TransformerLayer(
                img_size, embed_size, num_heads, mlp_dim, Conv, Norm
            )
            self.layers.append(layer)

    def forward(self, x):
        out = []
        x = self.embed(x)

        for i in range(0, self.num_layers):
            x = self.layers[i](x)
            if i in self.out_layers:
                out.append(x)

        return out
Exemple #22
0
class mLSTMCellStacked(Module):

    def __init__(self,
            input_size,
            hidden_size,
            num_layers):
        super(mLSTMCellStacked, self).__init__()
        self._input_size = input_size
        self._hidden_size = hidden_size
        self._num_layers = num_layers
        self._layers = ModuleList([mLSTMCell(input_size, hidden_size)])
        for i in range(num_layers - 1):
            self._layers.append(mLSTMCell(hidden_size, hidden_size))

    def forward(self, input, state=None):
        if state is None:
            state = [None] * self._num_layers

        next_state = []
        for i in range(self._num_layers):
            h, c = self._layers[i].forward(input, state[i])
            input = h
            next_state.append((h, c))
        return next_state
Exemple #23
0
class Net(torch.nn.Module):
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 alpha,
                 dropout=0.):

        super(Net, self).__init__()

        self.lin1 = Linear(in_channels, hidden_channels)

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(num_layers):
            self.convs.append(GCN2Conv(hidden_channels, alpha, cached=True))
            self.batch_norms.append(BatchNorm(hidden_channels))

        self.lin2 = Linear(hidden_channels, out_channels)

        self.dropout = dropout

    def forward(self, x, adj_t):
        x = F.dropout(x, self.dropout, training=self.training)
        x = x_0 = F.relu(self.lin1(x))

        for conv, batch_norm in zip(self.convs, self.batch_norms):
            x = F.dropout(x, self.dropout, training=self.training)
            x = batch_norm(x)
            x = F.relu(conv(x, x_0, adj_t)) + x

        x = F.dropout(x, self.dropout, training=self.training)
        x = self.lin2(x)

        return x.log_softmax(dim=-1)
                class DeepProcessor(Module):
                    def __init__(self):
                        super().__init__()

                        hdim2 = 32
                        height = width = 32
                        pos_emb_init = 0.01
                        self.pos_emb = Parameter(
                            torch.Tensor(hdim2, height, width // 2))
                        torch.nn.init.normal_(self.pos_emb,
                                              mean=0.,
                                              std=pos_emb_init)
                        self.conv = WnConv2d(in_channels=6,
                                             out_channels=32,
                                             kernel_size=3,
                                             padding=1)

                        self.gatedconvs = ModuleList([])
                        self.norm1 = ModuleList([])
                        self.gatedattns = ModuleList([])
                        self.norm2 = ModuleList([])
                        for _ in range(dequant_blocks):
                            self.gatedconvs.append(
                                GatedConv_Imagenet32(in_channels=hdim2,
                                                     aux_channels=0,
                                                     gate_nin=False,
                                                     pdrop=pdrop))
                            self.norm1.append(ImgLayerNorm(hdim2))
                            self.gatedattns.append(
                                GatedAttention_Imagenet32(in_channels=hdim2,
                                                          heads=attn_heads,
                                                          pdrop=pdrop))
                            self.norm2.append(ImgLayerNorm(hdim2))

                    def forward(self, x):
                        processed_context = self.conv(x)
                        for i in range(len(self.gatedconvs)):
                            processed_context = self.gatedconvs[i](
                                processed_context, aux=None)
                            processed_context = self.norm1[i](
                                processed_context)
                            processed_context = self.gatedattns[i](
                                processed_context, pos_emb=self.pos_emb)
                            processed_context = self.norm2[i](
                                processed_context)
                        return processed_context
Exemple #25
0
class ThickConv2d(Module):
    def __init__(self,
                 in_channels,
                 mid_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True):
        super(ThickConv2d, self).__init__()
        self.out_channels_list = ModuleList([])
        for i in range(out_channels):
            self.out_channels_list.append(
                ConvUnit(in_channels, mid_channels, kernel_size, stride,
                         padding, dilation, groups, bias))

    def forward(self, input):
        results = []
        for unit in self.out_channels_list:
            results.append(unit(input))
        out = torch.cat(results, 1)
        return out
Exemple #26
0
class _Interactions(Module):
    """Auto attention."""
    def __init__(
        self,
        node_hidden_channels=64,
        num_edge_gaussians=None,
        num_node_interaction_channels=64,
        n_conv=2,
        **kwargs,
    ):
        super(_Interactions, self).__init__()

        _ = num_edge_gaussians

        self.lin0 = Linear(node_hidden_channels, num_node_interaction_channels)
        self.conv = ModuleList()

        for _ in range(n_conv):
            nn = GATConvNew(
                in_channels=num_node_interaction_channels,
                out_channels=num_node_interaction_channels,
                add_self_loops=False,
                bias=True,
            )
            self.conv.append(nn)

        self.n_conv = n_conv

    def forward(self, x, edge_index, edge_weight, edge_attr, **kwargs):

        out = F.softplus(self.lin0(x))
        for convi in self.conv:
            out = out + convi(x=out, edge_index=edge_index)
            # out = F.relu(convi(x=out, edge_index=edge_index))

        return out
Exemple #27
0
class MLP(torch.nn.Module):
    def __init__(
        self,
        in_channels: int,
        hidden_channels: int,
        out_channels: int,
        num_layers: int,
        dropout: float = 0.0,
        batch_norm: bool = True,
        relu_last: bool = False,
    ):
        super(MLP, self).__init__()

        self.lins = ModuleList()
        self.lins.append(Linear(in_channels, hidden_channels))
        for _ in range(num_layers - 2):
            self.lins.append(Linear(hidden_channels, hidden_channels))
        self.lins.append(Linear(hidden_channels, out_channels))

        self.batch_norms = ModuleList()
        for _ in range(num_layers - 1):
            norm = BatchNorm1d(hidden_channels) if batch_norm else Identity()
            self.batch_norms.append(norm)

        self.dropout = dropout
        self.relu_last = relu_last

    def reset_parameters(self):
        for lin in self.lins:
            lin.reset_parameters()
        for batch_norm in self.batch_norms:
            batch_norm.reset_parameters()

    def forward(self, x):
        for lin, batch_norm in zip(self.lins[:-1], self.batch_norms):
            x = lin(x)
            if self.relu_last:
                x = batch_norm(x).relu_()
            else:
                x = batch_norm(x.relu_())
            x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.lins[-1](x)
        return x
Exemple #28
0
class MultiOutputLayer(Module):
    layers: ModuleList

    def __init__(self, input_size: int, metadata: Metadata,
                 categorical_activation: Module) -> None:
        super(MultiOutputLayer, self).__init__()

        self.layers = ModuleList()

        # accumulate binary or numerical variables into "blocks"
        current_block = None

        for variable_metadata in metadata.get_by_independent_variable():
            # first check if a block needs to be created
            if current_block is not None and not current_block.matches_type(
                    variable_metadata):
                # create the block
                self.layers.append(current_block.build(input_size))
                # empty the block
                current_block = None

            # if it is a binary or numerical variable
            if variable_metadata.is_binary() or variable_metadata.is_numerical(
            ):
                # create a block
                if current_block is None:
                    current_block = BlockBuilder(variable_metadata)
                # or add to the existing block
                else:
                    current_block.add(variable_metadata)

            # if it is a categorical variable
            elif variable_metadata.is_categorical():
                # create the categorical layer
                self.layers.append(
                    Sequential(
                        Linear(input_size, variable_metadata.get_size()),
                        categorical_activation))

            # if it is another type
            else:
                raise Exception(
                    "Unexpected variable type '{}' for variable '{}'.".format(
                        variable_metadata.get_type(),
                        variable_metadata.get_name()))

        # if there is still accumulated data for a block
        if current_block is not None:
            # create the last block
            self.layers.append(current_block.build(input_size))

    def forward(self, inputs: Tensor) -> Tensor:
        return torch.cat([layer(inputs) for layer in self.layers], dim=1)
Exemple #29
0
 def __init__(self, in_channel,out_channel,cnn_ker,cnn_stride,\
         pool_stride,pool_ker ,bn):
     super(CnnModule, self).__init__()
     mylist = ModuleList()
     mylist.append(Conv1d(in_channel,out_channel,
                          kernel_size=cnn_ker, \
                            stride=cnn_stride, padding=0,
                            bias=False))
     if bn == 1:
         mylist.append(BatchNorm1d(out_channel))
     if pool_ker > 0:
         mylist.append(MaxPool1d(kernel_size=pool_ker, stride=pool_stride))
     self.mylist = Sequential(*mylist)
Exemple #30
0
 def __init__(self, writer, num_hidden_layers=1):
     super(CosineNet, self).__init__()
     input_features = 1
     hidden_output_features = 10
     final_output_features = 1
     self.writer = writer
     layers = ModuleList()
     for i in range(num_hidden_layers):
         if i == 0:
             layers.append(
                 torch.nn.Linear(input_features, hidden_output_features))
         else:
             layers.append(
                 torch.nn.Linear(hidden_output_features,
                                 hidden_output_features))
         layers.append(torch.nn.ReLU())
     final_layer = torch.nn.Linear(hidden_output_features,
                                   final_output_features)
     layers.append(final_layer)
     self.model = torch.nn.Sequential(*layers)
     self.loss_func = torch.nn.MSELoss()