Exemplo n.º 1
0
    def __init__(self,
                 len_sequence,
                 embedding_size,
                 channels,
                 device,
                 readout_layers=2,
                 layers=3,
                 dropout=0.,
                 kernel_size=3,
                 alphabet_size=4,
                 non_linearity=True):
        super(CNNEncoder, self).__init__()

        self.embedding = nn.Embedding(alphabet_size + 1, channels)
        self.conv = torch.nn.Sequential()
        for l in range(layers):
            self.conv.add_module(
                'conv_' + str(l + 1),
                nn.Conv1d(in_channels=channels,
                          out_channels=channels,
                          kernel_size=kernel_size,
                          padding=kernel_size // 2))
            if non_linearity:
                self.conv.add_module('relu_' + str(l + 1), nn.ReLU())

        flat_size = channels * len_sequence
        self.readout = MLP(in_size=flat_size,
                           hidden_size=embedding_size,
                           out_size=embedding_size,
                           layers=readout_layers,
                           mid_activation='relu',
                           dropout=dropout,
                           device=device)
        self.to(device)
Exemplo n.º 2
0
    def __init__(self, len_sequence, embedding_size, layers, hidden_size, device, dropout=0., alphabet_size=4):
        super(MLPEncoder, self).__init__()

        self.device = device
        self.mlp = MLP(in_size=len_sequence * alphabet_size, hidden_size=hidden_size, out_size=embedding_size,
                           layers=layers, mid_activation='relu', dropout=dropout, device=device)
        self.to(device)
Exemplo n.º 3
0
    def __init__(self,
                 len_sequence,
                 embedding_size,
                 channels,
                 device,
                 readout_layers=2,
                 layers=3,
                 dropout=0.,
                 kernel_size=3,
                 alphabet_size=4,
                 pooling='avg',
                 non_linearity=True,
                 batch_norm=False,
                 stride=1):
        super(CNN, self).__init__()
        assert pooling == 'none' or pooling == 'avg' or pooling == 'max', "Wrong pooling type"

        self.layers = layers
        self.kernel_size = kernel_size
        self.embedding = nn.Linear(alphabet_size, channels)

        # construct convolutional layers
        self.conv = torch.nn.Sequential()
        for l in range(self.layers):
            self.conv.add_module(
                'conv_' + str(l + 1),
                nn.Conv1d(in_channels=channels,
                          out_channels=channels,
                          kernel_size=kernel_size,
                          padding=kernel_size // 2,
                          stride=stride))
            len_sequence = (len_sequence - 1) // stride + 1
            if batch_norm:
                self.conv.add_module('batchnorm_' + str(l + 1),
                                     nn.BatchNorm1d(num_features=channels))
            if non_linearity:
                self.conv.add_module('relu_' + str(l + 1), nn.ReLU())

            if pooling == 'avg':
                self.conv.add_module(pooling + '_pool_' + str(l + 1),
                                     nn.AvgPool1d(2))
                len_sequence //= 2
            elif pooling == 'max':
                self.conv.add_module(pooling + 'pool_' + str(l + 1),
                                     nn.MaxPool1d(2))
                len_sequence //= 2

        # construct readout
        print(len_sequence)
        flat_size = channels * len_sequence
        self.readout = MLP(in_size=flat_size,
                           hidden_size=embedding_size,
                           out_size=embedding_size,
                           layers=readout_layers,
                           mid_activation='relu',
                           dropout=dropout,
                           device=device)
        self.to(device)
Exemplo n.º 4
0
    def __init__(self, len_sequence, embedding_size, hidden_size, recurrent_layers, device, readout_layers, alphabet_size=4,
                 dropout=0.0):
        super(GRU, self).__init__()

        self.len_sequence = len_sequence
        self.sequence_encoder = nn.GRU(input_size=alphabet_size, hidden_size=hidden_size, num_layers=recurrent_layers,
                                   dropout=dropout)
        self.readout = MLP(in_size=hidden_size, hidden_size=hidden_size, out_size=embedding_size,
                           layers=readout_layers, mid_activation='relu', dropout=dropout, device=device)
        self.to(device)
Exemplo n.º 5
0
    def __init__(self,
                 layers,
                 channels,
                 kernel_size,
                 readout_layers=2,
                 non_linearity=True,
                 pooling='avg',
                 dropout=0.0,
                 batch_norm=False,
                 rank=2,
                 temperature=0.05,
                 init_size=1e-3,
                 max_scale=1. - 1e-3,
                 alphabet_size=4,
                 sequence_length=128,
                 device='cpu'):
        super(HypHCCNN, self).__init__(temperature=temperature,
                                       init_size=init_size,
                                       max_scale=max_scale)

        self.alphabet_size = alphabet_size
        self.device = device
        self.embedding = nn.Linear(alphabet_size, channels)
        self.conv = torch.nn.Sequential()
        for l in range(layers):
            self.conv.add_module(
                'conv_' + str(l + 1),
                nn.Conv1d(in_channels=channels,
                          out_channels=channels,
                          kernel_size=kernel_size,
                          padding=kernel_size // 2))
            if batch_norm:
                self.conv.add_module('batchnorm_' + str(l + 1),
                                     nn.BatchNorm1d(num_features=channels))
            if non_linearity:
                self.conv.add_module('relu_' + str(l + 1), nn.ReLU())

            if pooling == 'avg':
                self.conv.add_module(pooling + '_pool_' + str(l + 1),
                                     nn.AvgPool1d(2))
            elif pooling == 'max':
                self.conv.add_module(pooling + 'pool_' + str(l + 1),
                                     nn.MaxPool1d(2))

        flat_size = channels * sequence_length if pooling == 'none' else channels * (
            sequence_length // 2**layers)
        self.readout = MLP(in_size=flat_size,
                           hidden_size=rank,
                           out_size=rank,
                           layers=readout_layers,
                           mid_activation='relu',
                           dropout=dropout,
                           device=device)
Exemplo n.º 6
0
    def __init__(self,
                 len_sequence,
                 segment_size,
                 embedding_size,
                 hidden_size,
                 trans_layers,
                 readout_layers,
                 device,
                 alphabet_size=4,
                 dropout=0.0,
                 heads=1,
                 layer_norm=False,
                 mask='empty'):
        super(Transformer, self).__init__()

        self.segment_size = segment_size

        if mask == "empty":
            self.mask_sequence = generate_empty_mask(len_sequence //
                                                     segment_size).to(device)
        elif mask == "no_prev":
            self.mask_sequence = generate_square_previous_mask(
                len_sequence // segment_size).to(device)
        elif mask[:5] == "local":
            self.mask_sequence = generate_local_mask(
                len_sequence // segment_size, k=int(mask[5:])).to(device)

        self.sequence_trans = TransformerEncoderModel(
            ntoken=alphabet_size * segment_size,
            nout=hidden_size,
            ninp=hidden_size,
            nhead=heads,
            nhid=hidden_size,
            nlayers=trans_layers,
            dropout=dropout,
            layer_norm=layer_norm,
            max_len=len_sequence // segment_size)

        self.readout = MLP(in_size=len_sequence // segment_size * hidden_size,
                           hidden_size=embedding_size,
                           out_size=embedding_size,
                           layers=readout_layers,
                           dropout=dropout,
                           device=device)

        self.to(device)
Exemplo n.º 7
0
    def __init__(self,
                 len_sequence,
                 embedding_size,
                 layers,
                 hidden_size,
                 device,
                 batch_norm=True,
                 dropout=0.,
                 alphabet_size=4):
        super(MLPEncoder, self).__init__()

        self.encoder = MLP(in_size=alphabet_size * len_sequence,
                           hidden_size=hidden_size,
                           out_size=embedding_size,
                           layers=layers,
                           mid_activation='relu',
                           dropout=dropout,
                           device=device,
                           mid_b_norm=batch_norm)