예제 #1
0
 def create(self) -> Module:
     return ReLU(self.inplace)
예제 #2
0
 def __init__(self, i_dim, h_dim, e_dim, times):
     super(BaselineRegressNet, self).__init__()
     self.lin0 = Sequential(Linear(i_dim, h_dim), ReLU())
     self.conv_layer = ConvLayer(h_dim, e_dim, times)
     self.lin1 = Sequential(Linear(h_dim, h_dim), ReLU(), Linear(h_dim, 1))
예제 #3
0
 def __init__(self, h_dim, e_dim, times=3):
     super(ConvLayer, self).__init__()
     nn = Sequential(Linear(e_dim, h_dim), ReLU(), Linear(h_dim, h_dim * h_dim))
     self.conv = NNConv(h_dim, h_dim, nn, aggr='mean')
     self.gru = GRU(h_dim, h_dim)
     self.times = times
예제 #4
0
파일: model.py 프로젝트: basiralab/DGN
 def __init__(self, MODEL_PARAMS):
     super(DGN, self).__init__()
     self.model_params = MODEL_PARAMS
     
     nn = Sequential(Linear(self.model_params["Linear1"]["in"], self.model_params["Linear1"]["out"]), ReLU())
     self.conv1 = NNConv(self.model_params["conv1"]["in"], self.model_params["conv1"]["out"], nn, aggr='mean')
     
     nn = Sequential(Linear(self.model_params["Linear2"]["in"], self.model_params["Linear2"]["out"]), ReLU())
     self.conv2 = NNConv(self.model_params["conv2"]["in"], self.model_params["conv2"]["out"], nn, aggr='mean')
     
     nn = Sequential(Linear(self.model_params["Linear3"]["in"], self.model_params["Linear3"]["out"]), ReLU())
     self.conv3 = NNConv(self.model_params["conv3"]["in"], self.model_params["conv3"]["out"], nn, aggr='mean')
예제 #5
0
    def __init__(self, dim):
        super(NetGIN, self).__init__()

        num_features = 445

        nn1_1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        nn1_2 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1_1 = GINConv(nn1_1, train_eps=True)
        self.conv1_2 = GINConv(nn1_2, train_eps=True)
        self.bn1 = torch.nn.BatchNorm1d(dim)
        self.mlp_1 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn2_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn2_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2_1 = GINConv(nn2_1, train_eps=True)
        self.conv2_2 = GINConv(nn2_2, train_eps=True)
        self.bn2 = torch.nn.BatchNorm1d(dim)
        self.mlp_2 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn3_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn3_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3_1 = GINConv(nn3_1, train_eps=True)
        self.conv3_2 = GINConv(nn3_2, train_eps=True)
        self.bn3 = torch.nn.BatchNorm1d(dim)
        self.mlp_3 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn4_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn4_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4_1 = GINConv(nn4_1, train_eps=True)
        self.conv4_2 = GINConv(nn4_2, train_eps=True)
        self.bn4 = torch.nn.BatchNorm1d(dim)
        self.mlp_4 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        self.fc1 = Linear(4 * dim, dim)
        self.fc2 = Linear(dim, dim)
        self.fc3 = Linear(dim, dim)
        self.fc4 = Linear(dim, 1)
예제 #6
0
 def __init__(self):
     super(XENON_GCNN, self).__init__()
     self.lin = Sequential(Linear(FINAL_OUT * 127, 16), ReLU(),
                           Linear(16, 2))
     self.conv1 = GCNConv(3, 16)
     self.conv2 = GCNConv(16, FINAL_OUT)
예제 #7
0
    def __init__(self, num_classes, is_test=False, config=None, num_lstm=5):
        """Compose a SSD model using the given components.
		"""
        super(ResNetLSTM3, self).__init__()

        # alpha = 1
        # alpha_base = alpha
        # alpha_ssd = 0.5 * alpha
        # alpha_lstm = 0.25 * alpha

        resnet = resnet101(pretrained=True)
        all_modules = list(resnet.children())
        modules = all_modules[:-4]
        self.base_net = nn.Sequential(*modules)

        modules = all_modules[6:7]
        self.conv_final = nn.Sequential(*modules)

        self.num_classes = num_classes
        self.is_test = is_test
        self.config = config

        # lstm_layers = [BottleNeckLSTM(1024, 256),
        # 			   BottleNeckLSTM(256, 64),
        # 			   BottleNeckLSTM(64, 16),
        # 			   ConvLSTMCell(16, 16),
        # 			   ConvLSTMCell(16, 16)]

        lstm_layers = [
            BottleNeckLSTM(1024, 1024),
            BottleNeckLSTM(512, 512),
            BottleNeckLSTM(256, 256),
            ConvLSTMCell(256, 256),
            ConvLSTMCell(256, 256)
        ]

        self.lstm_layers = nn.ModuleList(
            [lstm_layers[i] for i in range(num_lstm)])

        self.extras = ModuleList([
            Sequential(
                Conv2d(in_channels=1024, out_channels=256, kernel_size=1),
                ReLU(),
                Conv2d(in_channels=256,
                       out_channels=512,
                       kernel_size=3,
                       stride=2,
                       padding=1), ReLU()),
            Sequential(
                Conv2d(in_channels=512, out_channels=128, kernel_size=1),
                ReLU(),
                Conv2d(in_channels=128,
                       out_channels=256,
                       kernel_size=3,
                       stride=2,
                       padding=1), ReLU()),
            Sequential(
                Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                ReLU(), Conv2d(in_channels=128,
                               out_channels=256,
                               kernel_size=3), ReLU()),
            Sequential(
                Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                ReLU(), Conv2d(in_channels=128,
                               out_channels=256,
                               kernel_size=3), ReLU())
        ])

        self.regression_headers = ModuleList([
            Conv2d(in_channels=512,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=1024,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=512,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),  # TODO: change to kernel_size=1, padding=0?
        ])

        self.classification_headers = ModuleList([
            Conv2d(in_channels=512,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=1024,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=512,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),  # TODO: change to kernel_size=1, padding=0?
        ])

        self.device = torch.device(
            f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
        if is_test:
            self.config = config
            self.priors = config.priors.to(self.device)
예제 #8
0
def test_run_funcs_model_forward():
    funcs = ModuleRunFuncs()
    assert funcs.model_forward.__name__ == tensors_module_forward.__name__
    out = funcs.model_forward(torch.randn(8, 4), Sequential(Linear(4, 8), ReLU()))
    assert out.shape[0] == 8
    assert out.shape[1] == 8
예제 #9
0
def test_run_funcs_model_backward():
    funcs = ModuleRunFuncs()
    assert funcs.model_backward.__name__ == def_model_backward.__name__
    losses = {DEFAULT_LOSS_KEY: default_calcs_for_backwards()}
    module = Sequential(Linear(8, 8), ReLU())
    funcs.model_backward(losses, module)
예제 #10
0
파일: edtworesnet.py 프로젝트: WatChMaL/CNN
# PyTorch imports
from torch import cat
from torch.nn import Module, Sequential, Linear, Conv2d, ConvTranspose2d, BatchNorm2d, ReLU
from torch.nn.init import kaiming_normal_, constant_
# -

# WatChMaL imports
from models import resnetblocks

# Global variables
__all__ = [
    'etworesnet18', 'etworesnet34', 'etworesnet50', 'etworesnet101',
    'etworesnet152', 'dtworesnet18', 'dtworesnet34', 'dtworesnet50',
    'dtworesnet101', 'dtworesnet152'
]
_RELU = ReLU()

# -------------------------------
# Encoder architecture layers
# -------------------------------


class EtworesNet(Module):
    def __init__(self,
                 block,
                 layers,
                 num_input_channels,
                 num_latent_dims,
                 zero_init_residual=False):

        super().__init__()
예제 #11
0
def test_run_results(name, loss_tensors, batch_size, expected_mean, expected_std):
    results = ModuleRunResults()

    for loss in loss_tensors:
        results.append({name: loss}, batch_size)

    mean = results.result_mean(name)
    std = results.result_std(name)

    assert len(results.result(name)) == len(loss_tensors)
    assert (mean - expected_mean).abs() < 0.0001
    assert (std - expected_std).abs() < 0.0001


TEST_MODULE = Sequential(
    Linear(8, 16), ReLU(), Linear(16, 32), ReLU(), Linear(32, 1), ReLU()
)


class DatasetImpl(Dataset):
    def __init__(self, length: int):
        self._length = length
        self._x_feats = [torch.randn(8) for _ in range(length)]
        self._y_labs = [torch.randn(1) for _ in range(length)]

    def __getitem__(self, index: int):
        return self._x_feats[index], self._y_labs[index]

    def __len__(self) -> int:
        return self._length
예제 #12
0
    def __init__(self,
                 n_output=1,
                 num_features_xd=78,
                 num_features_xt=25,
                 n_filters=32,
                 embed_dim=128,
                 output_dim=128,
                 dropout=0.2):

        super(GINConvNetClassification, self).__init__()
        dim = 32
        self.n_output = n_output
        # variables to store gradient
        self.grads = {}
        self.grad_x = 0
        self.grad_embedded_xt = 0
        self.grad_conv_xt = 0
        self.grad_conv = 0
        self.conv = 0
        self.conv_xt = 0

        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()

        # convolution layers for drug
        nn1 = Sequential(Linear(num_features_xd, dim), ReLU(),
                         Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)
        #linear layer for drug
        self.fc1_xd = Linear(dim, output_dim)

        # embed layer for protein
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        self.embedding_xt.weight.requires_grad = False
        # 1D convolution on protein sequence
        self.conv_xt_1 = nn.Conv1d(in_channels=1000,
                                   out_channels=n_filters,
                                   kernel_size=8)
        # linear layer for protein
        self.fc1_xt = nn.Linear(32 * 121, output_dim)

        # combined layers
        self.fc1 = nn.Linear(256, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.classifyout = nn.Linear(
            256, 2 * self.n_output)  # n_output = 2 for classification task
예제 #13
0
        self.prefix = prefix

    def __len__(self):
        return len(os.listdir(self.datadir))

    def __getitem__(self, idx):
        if type(idx) is slice:
            res = [
                self.__getitem__(i) for i in range(
                    *list(filter(None, [idx.start, idx.stop, idx.step])))
            ]
            return res
        fname = os.path.join(self.datadir,
                             self.prefix + '_' + f"{idx:06}" + '.xyz')
        dat = process(fname)
        nx_graph = rdkit_process(dat)
        return from_networkx(nx_graph)


qm9 = QM9Dataset('xyzfiles', 'dsgdb9nsd')
nn = Seq(Lin(4, 32), ReLU(), Lin(32, 1))

conv = GINEConv(nn, train_eps=True, edge_dim=1)
for i in range(1, 11):
    mol = qm9[i]
    print(mol.bond_type)
    processed = conv(mol.coord.float(), mol.edge_index,
                     mol.bond_type.float().view(-1, 1))

    print(processed)
예제 #14
0
def create_Mb_Tiny_RFB_fd(num_classes, is_test=False, device="cuda"):
    base_net = Mb_Tiny_RFB(2)
    base_net_model = base_net.model  # disable dropout layer

    source_layer_indexes = [8, 11, 13]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=base_net.base_channel * 16,
                   out_channels=base_net.base_channel * 4,
                   kernel_size=1), ReLU(),
            SeperableConv2d(in_channels=base_net.base_channel * 4,
                            out_channels=base_net.base_channel * 16,
                            kernel_size=3,
                            stride=2,
                            padding=1), ReLU())
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * 4,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=base_net.base_channel * 16,
               out_channels=3 * 4,
               kernel_size=3,
               padding=1)
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=3 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=2 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=base_net.base_channel * 16,
               out_channels=3 * num_classes,
               kernel_size=3,
               padding=1)
    ])

    return SSD(num_classes,
               base_net_model,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config,
               device=device)
예제 #15
0
파일: ctgan.py 프로젝트: shuvoworld/CTGAN
 def __init__(self, i, o):
     super(Residual, self).__init__()
     self.fc = Linear(i, o)
     self.bn = BatchNorm1d(o)
     self.relu = ReLU()
예제 #16
0
def test_def_model_backward():
    losses = {DEFAULT_LOSS_KEY: default_calcs_for_backwards()}
    module = Sequential(Linear(8, 8), ReLU())
    def_model_backward(losses, module)
예제 #17
0
 def __init__(self):
     super(Stage_x, self).__init__()
     self.conv1_L1 = Conv2d(in_channels=185,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv2_L1 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv3_L1 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv4_L1 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv5_L1 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv6_L1 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=1,
                            stride=1,
                            padding=0)
     self.conv7_L1 = Conv2d(in_channels=128,
                            out_channels=38,
                            kernel_size=1,
                            stride=1,
                            padding=0)
     self.conv1_L2 = Conv2d(in_channels=185,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv2_L2 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv3_L2 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv4_L2 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv5_L2 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=7,
                            stride=1,
                            padding=3)
     self.conv6_L2 = Conv2d(in_channels=128,
                            out_channels=128,
                            kernel_size=1,
                            stride=1,
                            padding=0)
     self.conv7_L2 = Conv2d(in_channels=128,
                            out_channels=19,
                            kernel_size=1,
                            stride=1,
                            padding=0)
     self.relu = ReLU()
    def __init__(self, n_features, n_embeddings, n_units):
        super(Net, self).__init__()
        self.n_features = n_features
        self.n_embeddings = n_embeddings
        self.n_units = n_units

        self.encoder = ModuleDict({
            'gru':
            GRU(self.n_features,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, self.n_embeddings)
        })

        self.decoder = ModuleDict({
            'gru':
            GRU(self.n_embeddings,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, self.n_features)
        })

        self.decoder1 = ModuleDict({
            'gru':
            GRU(16,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, 16)
        })

        self.decoder2 = ModuleDict({
            'gru':
            GRU(10,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, 10)
        })

        self.decoder3 = ModuleDict({
            'gru':
            GRU(self.n_embeddings,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, 1)
        })

        self.decoder4 = ModuleDict({
            'gru':
            GRU(self.n_embeddings,
                self.n_units,
                3,
                dropout=0.1,
                bidirectional=True,
                batch_first=True),
            'linear':
            Linear(2 * self.n_units, 1)
        })

        self.relu = ReLU()
        self.sigmoid = Sigmoid()
예제 #19
0
 def __init__(self,
              in_channels,
              channels,
              kernel_size,
              stride=(1, 1),
              padding=(0, 0),
              dilation=(1, 1),
              groups=1,
              bias=True,
              radix=2,
              reduction_factor=4,
              rectify=False,
              rectify_avg=False,
              norm=None,
              dropblock_prob=0.0,
              **kwargs):
     super(SplAtConv2d, self).__init__()
     padding = _pair(padding)
     self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
     self.rectify_avg = rectify_avg
     inter_channels = max(in_channels * radix // reduction_factor, 32)
     self.radix = radix
     self.cardinality = groups
     self.channels = channels
     self.dropblock_prob = dropblock_prob
     if self.rectify:
         from rfconv import RFConv2d
         self.conv = RFConv2d(in_channels,
                              channels * radix,
                              kernel_size,
                              stride,
                              padding,
                              dilation,
                              groups=groups * radix,
                              bias=bias,
                              average_mode=rectify_avg,
                              **kwargs)
     else:
         self.conv = Conv2d(in_channels,
                            channels * radix,
                            kernel_size,
                            stride,
                            padding,
                            dilation,
                            groups=groups * radix,
                            bias=bias,
                            **kwargs)
     self.use_bn = norm is not None
     if self.use_bn:
         self.bn0 = get_norm(norm, channels * radix)
     self.relu = ReLU(inplace=True)
     self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
     if self.use_bn:
         self.bn1 = get_norm(norm, inter_channels)
     self.fc2 = Conv2d(inter_channels,
                       channels * radix,
                       1,
                       groups=self.cardinality)
     if dropblock_prob > 0.0:
         self.dropblock = DropBlock2D(dropblock_prob, 3)
     self.rsoftmax = rSoftMax(radix, groups)
예제 #20
0
 def __new__(cls, ndim):
     ind, hd1, hd2, hd3 = ndim
     net = Sequential(Linear(ind, hd1), ReLU(), Linear(hd1, hd2), ReLU(),
                      Linear(hd2, hd3), ReLU())
     return net
예제 #21
0
    def test_conv_bn_relu(
            self,
            batch_size,
            input_channels_per_group,
            height,
            width,
            output_channels_per_group,
            groups,
            kernel_h,
            kernel_w,
            stride_h,
            stride_w,
            pad_h,
            pad_w,
            dilation,
            padding_mode,
            use_relu,
            eps,
            momentum,
            freeze_bn
    ):
        # **** WARNING: This is used to temporarily disable MKL-DNN convolution due
        # to a bug: https://github.com/pytorch/pytorch/issues/23825
        # Once this bug is fixed, this context manager as well as its callsites
        # should be removed!
        with torch.backends.mkldnn.flags(enabled=False):
            input_channels = input_channels_per_group * groups
            output_channels = output_channels_per_group * groups
            dilation_h = dilation_w = dilation

            conv_op = Conv2d(
                input_channels,
                output_channels,
                (kernel_h, kernel_w),
                (stride_h, stride_w),
                (pad_h, pad_w),
                (dilation_h, dilation_w),
                groups,
                False,  # No bias
                padding_mode
            ).to(dtype=torch.double)
            bn_op = BatchNorm2d(output_channels, eps, momentum).to(dtype=torch.double)
            relu_op = ReLU()

            cls = ConvBnReLU2d if use_relu else ConvBn2d
            qat_op = cls(
                input_channels,
                output_channels,
                (kernel_h, kernel_w),
                (stride_h, stride_w),
                (pad_h, pad_w),
                (dilation_h, dilation_w),
                groups,
                None,  # bias
                padding_mode,
                eps,
                momentum,
                freeze_bn=True,
                qconfig=default_qat_qconfig
            ).to(dtype=torch.double)
            qat_op.apply(torch.quantization.disable_fake_quant)
            if freeze_bn:
                qat_op.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
            else:
                qat_op.apply(torch.nn.intrinsic.qat.update_bn_stats)

            # align inputs and internal parameters
            input = torch.randn(batch_size, input_channels, height, width, dtype=torch.double, requires_grad=True)
            conv_op.weight = torch.nn.Parameter(qat_op.weight.detach())
            bn_op.running_mean = qat_op.bn.running_mean.clone()
            bn_op.running_var = qat_op.bn.running_var.clone()
            bn_op.weight = torch.nn.Parameter(qat_op.bn.weight.detach())
            bn_op.bias = torch.nn.Parameter(qat_op.bn.bias.detach())

            def compose(functions):
                # functions are reversed for natural reading order
                return reduce(lambda f, g: lambda x: f(g(x)), functions[::-1], lambda x: x)

            if not use_relu:
                def relu_op(x):
                    return x

            if freeze_bn:
                def ref_op(x):
                    x = conv_op(x)
                    x = (x - bn_op.running_mean.reshape([1, -1, 1, 1])) * \
                        (bn_op.weight / torch.sqrt(bn_op.running_var + bn_op.eps)) \
                        .reshape([1, -1, 1, 1]) + bn_op.bias.reshape([1, -1, 1, 1])
                    x = relu_op(x)
                    return x
            else:
                ref_op = compose([conv_op, bn_op, relu_op])

            input_clone = input.clone().detach().requires_grad_()
            for i in range(2):
                result_ref = ref_op(input)
                result_actual = qat_op(input_clone)
                self.assertEqual(result_ref, result_actual)

                # backward
                dout = torch.randn(result_ref.size(), dtype=torch.double)
                loss = (result_ref - dout).sum()
                loss.backward()
                input_grad_ref = input.grad.cpu()
                weight_grad_ref = conv_op.weight.grad.cpu()
                gamma_grad_ref = bn_op.weight.grad.cpu()
                beta_grad_ref = bn_op.bias.grad.cpu()
                running_mean_ref = bn_op.running_mean
                running_var_ref = bn_op.running_var
                num_batches_tracked_ref = bn_op.num_batches_tracked
                loss = (result_actual - dout).sum()
                loss.backward()
                input_grad_actual = input_clone.grad.cpu()
                weight_grad_actual = qat_op.weight.grad.cpu()
                gamma_grad_actual = qat_op.bn.weight.grad.cpu()
                beta_grad_actual = qat_op.bn.bias.grad.cpu()
                running_mean_actual = qat_op.bn.running_mean
                running_var_actual = qat_op.bn.running_var
                num_batches_tracked_actual = qat_op.bn.num_batches_tracked
                precision = 1e-10
                self.assertEqual(input_grad_ref, input_grad_actual, atol=precision, rtol=0)
                self.assertEqual(weight_grad_ref, weight_grad_actual, atol=precision, rtol=0)
                self.assertEqual(gamma_grad_ref, gamma_grad_actual, atol=precision, rtol=0)
                self.assertEqual(beta_grad_ref, beta_grad_actual, atol=precision, rtol=0)
                self.assertEqual(num_batches_tracked_ref, num_batches_tracked_actual, atol=precision, rtol=0)
                self.assertEqual(running_mean_ref, running_mean_actual, atol=precision, rtol=0)
                self.assertEqual(running_var_ref, running_var_actual, atol=precision, rtol=0)
예제 #22
0
def DeConv2D(in_channels, out_channels):
    return Sequential(
        ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
        ReLU(inplace=True),
    )
예제 #23
0
                                input_nodes=val_input_nodes,
                                **kwargs)
else:
    train_loader = HGTLoader(data,
                             num_samples=[1024] * 4,
                             shuffle=True,
                             input_nodes=train_input_nodes,
                             **kwargs)
    val_loader = HGTLoader(data,
                           num_samples=[1024] * 4,
                           input_nodes=val_input_nodes,
                           **kwargs)

model = Sequential('x, edge_index', [
    (SAGEConv((-1, -1), 64), 'x, edge_index -> x'),
    ReLU(inplace=True),
    (SAGEConv((-1, -1), 64), 'x, edge_index -> x'),
    ReLU(inplace=True),
    (Linear(-1, dataset.num_classes), 'x -> x'),
])
model = to_hetero(model, data.metadata(), aggr='sum').to(device)


@torch.no_grad()
def init_params():
    # Initialize lazy parameters via forwarding a single batch to the model:
    batch = next(iter(train_loader))
    batch = batch.to(device, 'edge_index')
    model(batch.x_dict, batch.edge_index_dict)

예제 #24
0
 def forward(self, y_pred_real, y_pred_fake):
     return mn(ReLU()(1.0 - (y_pred_real - mn(y_pred_fake))) +
               ReLU()(1.0 + (y_pred_fake - mn(y_pred_real)))) / 2
예제 #25
0
def create_mb_tiny_fd(num_classes, is_test=False, device="cuda"):
    base_net = Mb_Tiny(num_classes)
    base_net_model = base_net.model  # disable dropout layer

    prior_nums = [len(item) for item in config.min_boxes_w]
    print('prior_nums in create_mb_tiny_fd: ', prior_nums)
    print('num_classes in create_mb_tiny_fd: ', num_classes)

    source_layer_indexes = [8, 11, 13]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=base_net.base_channel * 16,
                   out_channels=base_net.base_channel * 4,
                   kernel_size=1), ReLU(),
            SeperableConv2d(in_channels=base_net.base_channel * 4,
                            out_channels=base_net.base_channel * 16,
                            kernel_size=3,
                            stride=2,
                            padding=1), ReLU())
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=prior_nums[0] * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=prior_nums[1] * 4,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=prior_nums[2] * 4,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=base_net.base_channel * 16,
               out_channels=prior_nums[3] * 4,
               kernel_size=3,
               padding=1)
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=base_net.base_channel * 4,
                        out_channels=prior_nums[0] * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 8,
                        out_channels=prior_nums[1] * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=base_net.base_channel * 16,
                        out_channels=prior_nums[2] * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=base_net.base_channel * 16,
               out_channels=prior_nums[3] * num_classes,
               kernel_size=3,
               padding=1)
    ])

    return SSD(num_classes,
               base_net_model,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config,
               device=device)
예제 #26
0
    def __init__(self):
        super(Net, self).__init__()

        num_features = dataset.num_features
        dim = args.hidden

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim),
                         ReLU(), torch.nn.BatchNorm1d(dim))
        self.conv1 = GINConv(nn1)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv2 = GINConv(nn2)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv3 = GINConv(nn3)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv4 = GINConv(nn4)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv5 = GINConv(nn5)

        self.fc1 = Sequential(Linear(num_features, dim), ReLU(),
                              torch.nn.BatchNorm1d(dim))
        self.fc2 = Sequential(Linear(dim, dim), ReLU(),
                              torch.nn.BatchNorm1d(dim))

        self.lin = Linear(dim, dataset.num_classes)
예제 #27
0
 def __init__(self, i_dim, h_dim, e_dim, times):
     super(IpsClassifyNet, self).__init__()
     self.lin0 = Sequential(Linear(i_dim, h_dim), ReLU())
     self.conv_layer = ConvLayer(h_dim, e_dim, times)
     self.lin1 = Sequential(Linear(h_dim, h_dim), ReLU(), Linear(h_dim, 2))
예제 #28
0
        counter += 1
        if pred.item() == data[1]:
            num_correct += 1

    accuracy = num_correct / (ldr.__len__())
    print("Accuracy: ", accuracy)
    #return model to training mode
    set_grad_enabled(True)
    net.train()
    return accuracy


#Define binary classifier network
net = Sequential(
    Conv2d(3, filter1, kernel_size=2, stride=1, padding=0),
    ReLU(),
    MaxPool2d(kernel_size=2, stride=2),
    Conv2d(filter1, filter2, kernel_size=4, stride=1, padding=0),
    ReLU(),
    #MaxPool2d(kernel_size=2, stride=2),
    Flatten(),
    Linear(input_dim, layer1),
    ReLU(),
    Linear(layer1, layer2),
    ReLU(),
    Linear(layer2, layer3),
    ReLU(),
    Linear(layer3, out_dim),
)

# x = randn(1, 3, image_size, image_size)
예제 #29
0
 def __init__(self, i_dim, h_dim, e_dim, times):
     super(CausalFeatureNet, self).__init__()
     self.lin0 = Sequential(Linear(i_dim, h_dim), ReLU())
     self.conv_layer = ConvLayer(h_dim, e_dim, times)
예제 #30
0
def MLP(channels, batch_norm=True):
    return Seq(*[
        Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i]))
        for i in range(1, len(channels))
    ])