示例#1
0
    def __init__(self, input_shape, arc='celeba'):
        super(GlobalDiscriminator, self).__init__()
        self.arc = arc
        self.input_shape = input_shape
        self.output_shape = (1024, )
        self.img_c = input_shape[0]
        self.img_h = input_shape[1]
        self.img_w = input_shape[2]

        # input_shape: (None, img_c, img_h, img_w)
        self.conv1 = nn.Conv2d(self.img_c,
                               64,
                               kernel_size=5,
                               stride=2,
                               padding=2)
        self.bn1 = nn.BatchNorm2d(64)
        self.act1 = nn.ReLU()
        # input_shape: (None, 64, img_h//2, img_w//2)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
        self.bn2 = nn.BatchNorm2d(128)
        self.act2 = nn.ReLU()
        # input_shape: (None, 128, img_h//4, img_w//4)
        self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
        self.bn3 = nn.BatchNorm2d(256)
        self.act3 = nn.ReLU()
        # input_shape: (None, 256, img_h//8, img_w//8)
        self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
        self.bn4 = nn.BatchNorm2d(512)
        self.act4 = nn.ReLU()
        # input_shape: (None, 512, img_h//16, img_w//16)
        self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
        self.bn5 = nn.BatchNorm2d(512)
        self.act5 = nn.ReLU()
        # input_shape: (None, 512, img_h//32, img_w//32)
        if arc == 'celeba':
            in_features = 512 * (self.img_h // 32) * (self.img_w // 32)
            self.flatten6 = Flatten()
            self.linear6 = nn.Linear(in_features, 1024)
            self.act6 = nn.ReLU()
        elif arc == 'places2' or arc == 'pascal':
            self.conv6 = nn.Conv2d(512,
                                   512,
                                   kernel_size=5,
                                   stride=2,
                                   padding=2)
            self.bn6 = nn.BatchNorm2d(512)
            self.act6 = nn.ReLU()
            # input_shape (None, 512, img_h//64, img_w//64)
            in_features = 512 * (self.img_h // 64) * (self.img_w // 64)
            self.flatten7 = Flatten()
            self.linear7 = nn.Linear(in_features, 1024)
            self.act7 = nn.ReLU()
        else:
            raise ValueError('Unsupported architecture \'%s\'.' % self.arc)
示例#2
0
 def load_model(self):
     self.backbone = densenet121(False).features
     self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                               nn.Linear(1024, 14))
     model_name = 'chexnet.h5'
     state_dict = torch.load(model_name)
     self.load_state_dict(state_dict)
示例#3
0
def Inference(in_op,num_labels):
    conv1 = Conv(in_op,'conv1',3,1,80)
    conv2 = Conv(conv1,'conv2',3,1,64)
    pool1 = Maxpool(conv2,'pool1',2,2)

    conv3 = Conv(pool1,'conv3',3,1,64)
#    conv4 = Conv(conv3,'conv4',3,1,64)
    pool2 = Maxpool(conv3,'pool2',2,2)

#    conv5 = Conv(pool2,'conv5',3,1,32)
#    conv6 = Conv(conv5,'conv6',3,1,32)
#    pool3 = Maxpool(conv6,'pool3',2,2)
##
#    conv7 = Conv(pool3,'conv7',3,1,246)
#    conv8 = Conv(conv7,'conv8',3,1,256)
#    pool4 = Maxpool(conv8,'pool4',2,2)
#
#    conv9 = Conv(pool4,'conv9',3,1,128)
#    conv10= Conv(conv9,'conv10',3,1,128)
#    pool5 = Maxpool(conv10,'pool5',2,2)

    flat  = Flatten(pool2)
    fc1   = Fc(flat, 'fc1', 128, activation = 'relu')
#    drop1 = tf.nn.dropout(fc1,0.75)
#    fc2   = Fc(drop1,'fc2',32)
#    drop2 = tf.nn.dropout(fc2,0.5)
    logit = Fc(fc1, 'logit', num_labels)
    return logit
示例#4
0
    def __init__(self, context: PyTorchTrialContext) -> None:
        self.context = context

        # Create a unique download directory for each rank so they don't overwrite each other.
        self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
        self.data_downloaded = False

        self.model = self.context.wrap_model(nn.Sequential(
            nn.Conv2d(1, self.context.get_hparam("n_filters1"), 3, 1),
            nn.ReLU(),
            nn.Conv2d(
                self.context.get_hparam("n_filters1"), self.context.get_hparam("n_filters2"), 3,
            ),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Dropout2d(self.context.get_hparam("dropout1")),
            Flatten(),
            nn.Linear(144 * self.context.get_hparam("n_filters2"), 128),
            nn.ReLU(),
            nn.Dropout2d(self.context.get_hparam("dropout2")),
            nn.Linear(128, 10),
            nn.LogSoftmax(),
        ))

        self.optimizer = self.context.wrap_optimizer(torch.optim.Adadelta(
            self.model.parameters(), lr=self.context.get_hparam("learning_rate"))
        )
示例#5
0
    def __init__(self, context: PyTorchTrialContext) -> None:
        self.context = context

        self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
        self.data_downloaded = False

        self.model = self.context.wrap_model(
            nn.Sequential(
                nn.Conv2d(1, self.context.get_hparam("n_filters1"), 3, 1),
                nn.ReLU(),
                nn.Conv2d(
                    self.context.get_hparam("n_filters1"),
                    self.context.get_hparam("n_filters2"),
                    3,
                ),
                nn.ReLU(),
                nn.MaxPool2d(2),
                nn.Dropout2d(self.context.get_hparam("dropout1")),
                Flatten(),
                nn.Linear(144 * self.context.get_hparam("n_filters2"), 128),
                nn.ReLU(),
                nn.Dropout2d(self.context.get_hparam("dropout2")),
                nn.Linear(128, 10),
                nn.LogSoftmax(),
            ))

        self.optimizer = self.context.wrap_optimizer(
            torch.optim.Adadelta(self.model.parameters(),
                                 lr=self.context.get_hparam("learning_rate")))

        # We let name=None (by not specifiying name) to return a dictionary of metrics from a
        # single reducer (one f1_score per class).  If we were going to return a single metric
        # (rather than a dictionary of multiple metrics) we would have to specify the name here.
        self.f1_score = self.context.experimental.wrap_reducer(
            PerClassF1Score())
示例#6
0
文件: networks.py 项目: JmfanBU/colt
    def __init__(self, device, dataset, input_channel, input_size, width,
                 linear_size):
        super(cnn_2layer, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset, IBP=True)
        self.normalizer = Normalization(mean, sigma)

        self.layers = [
            Normalization(mean, sigma),
            Conv2d(input_channel,
                   4 * width,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size),
            ReLU((4 * width, input_size // 2, input_size // 2)),
            Conv2d(4 * width,
                   8 * width,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((8 * width, input_size // 4, input_size // 4)),
            Flatten(),
            Linear(8 * width * (input_size // 4) * (input_size // 4),
                   linear_size),
            ReLU(linear_size),
            Linear(linear_size, 10),
        ]
示例#7
0
 def load_model(self, model_name):
     self.backbone = densenet121(False).features
     self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                               nn.Linear(1024, 14))
     path = Path('/content/drive/My Drive/SRP/Project/chestX-ray-14')
     state_dict = torch.load(path / model_name / 'chexnet.h5')
     self.load_state_dict(state_dict)
示例#8
0
    def __init__(self, d, h, w, activation, hidden_size, num_layers, recurrent=False):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        self.main = nn.Sequential(
            init_(nn.Conv2d(d, hidden_size, kernel_size=1)),
            activation,
            *[
                nn.Sequential(
                    init_(
                        nn.Conv2d(hidden_size, hidden_size, kernel_size=1), activation
                    ),
                    activation,
                )
                for _ in range(num_layers)
            ],
            # init_(nn.Conv2d(d, 32, 8, stride=4)), nn.ReLU(),
            # init_(nn.Conv2d(32, 64, kernel_size=4, stride=2)), nn.ReLU(),
            # init_(nn.Conv2d(32, 64, kernel_size=4, stride=2)), nn.ReLU(),
            # init_(nn.Conv2d(64, 32, kernel_size=3, stride=1)),
            activation,
            Flatten(),
            # init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
            init_(nn.Linear(hidden_size * h * w, hidden_size)),
            activation,
        )

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#9
0
 def load_model(self, model_name):
     self.backbone = densenet121(False).features
     self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                               nn.Linear(1024, 14))
     path = Path('/home/dattran/data/xray-thesis/chestX-ray14/models')
     state_dict = torch.load(path / model_name / 'best.h5')
     self.load_state_dict(state_dict)
def vgg_bn():
    return [
        Conv2D([3, 3], 32, [1, 1, 1, 1], padding='SAME'),
        Conv2DBatchNorm(32),
        Activation(tf.nn.relu),
        Conv2D([3, 3], 32, [1, 1, 1, 1], padding='SAME'),
        Conv2DBatchNorm(32),
        Activation(tf.nn.relu),
        Conv2D([3, 3], 64, [1, 2, 2, 1]),
        Conv2DBatchNorm(64),
        Activation(tf.nn.relu),
        Conv2D([3, 3], 64, [1, 1, 1, 1], padding='SAME'),
        Conv2DBatchNorm(64),
        Activation(tf.nn.relu),
        Conv2D([3, 3], 128, [1, 2, 2, 1]),
        Conv2DBatchNorm(128),
        Activation(tf.nn.relu),
        Conv2D([3, 3], 128, [1, 1, 1, 1], padding='SAME'),
        Conv2DBatchNorm(128),
        Activation(tf.nn.relu),
        Flatten(),
        Dense(128),
        Activation(tf.sigmoid),
        Dropout(0.5),
        Dense(10),
        Activation(tf.nn.softmax),
    ]
示例#11
0
def generate_sequential_network():

    net = torch.nn.Sequential(torch.nn.Conv2d(1, 6,
                                              kernel_size=(5, 5),
                                              padding=(2, 2)),
                              torch.nn.ReLU(),
                              torch.nn.MaxPool2d(kernel_size=(2, 2),
                                                 stride=(2, 2)),
                              torch.nn.Conv2d(6, 16,
                                              kernel_size=(5, 5),
                                              padding=(0, 0)),
                              torch.nn.ReLU(),
                              torch.nn.MaxPool2d(kernel_size=(2, 2),
                                                 stride=(2, 2)),
                              torch.nn.Conv2d(16, 120,
                                              kernel_size=(5, 5),
                                              padding=(0, 0)),
                              torch.nn.ReLU(),
                              Flatten(),
                              torch.nn.Linear(120, 84),
                              torch.nn.ReLU(),
                              torch.nn.Linear(84, 10),
                              torch.nn.Sigmoid()
                              )

    return net
示例#12
0
    def __init__(self, input_shape):
        super(LocalDiscriminator, self).__init__()
        self.input_shape = input_shape
        self.output_shape = (1024,)
        self.img_c = input_shape[0]
        self.img_h = input_shape[1]
        self.img_w = input_shape[2]
        # input_shape: (None, img_c, img_h, img_w)
        self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
        self.bn1 = nn.BatchNorm2d(64)
        self.act1 = nn.ReLU()
        # input_shape: (None, 64, img_h//2, img_w//2)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
        self.bn2 = nn.BatchNorm2d(128)
        self.act2 = nn.ReLU()
        # input_shape: (None, 128, img_h//4, img_w//4)
        self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
        self.bn3 = nn.BatchNorm2d(256)
        self.act3 = nn.ReLU()
        # input_shape: (None, 256, img_h//8, img_w//8)
        self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
        self.bn4 = nn.BatchNorm2d(512)
        self.act4 = nn.ReLU()
        # input_shape: (None, 512, img_h//16, img_w//16)
        self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
        self.bn5 = nn.BatchNorm2d(512)
        self.act5 = nn.ReLU()
        # input_shape: (None, 512, img_h//32, img_w//32)

        in_features = 512 * (max(self.img_h//32,1)) * (max(self.img_w//32,1))
        self.flatten6 = Flatten()
        # input_shape: (None, 512 * img_h//32 * img_w//32)
        self.linear6 = nn.Linear(in_features, 1024)
        self.act6 = nn.ReLU()
示例#13
0
    def __init__(self,
                 width_coeff,
                 depth_coeff,
                 depth_div=8,
                 min_depth=None,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2,
                 num_classes=1000):
        super().__init__()
        min_depth = min_depth or depth_div

        def renew_ch(x):
            if not width_coeff:
                return x

            x *= width_coeff
            new_x = max(min_depth,
                        int(x + depth_div / 2) // depth_div * depth_div)
            if new_x < 0.9 * x:
                new_x += depth_div
            return int(new_x)

        def renew_repeat(x):
            return int(math.ceil(x * depth_coeff))

        self.stem = conv_bn_act(3,
                                renew_ch(32),
                                kernel_size=3,
                                stride=2,
                                bias=False)

        self.blocks = nn.Sequential(
            #       input channel  output    expand  k  s                   skip  se
            MBBlock(renew_ch(32), renew_ch(16), 1, 3, 1, renew_repeat(1), True,
                    0.25, drop_connect_rate),
            MBBlock(renew_ch(16), renew_ch(24), 6, 3, 2, renew_repeat(2), True,
                    0.25, drop_connect_rate),
            MBBlock(renew_ch(24), renew_ch(40), 6, 5, 2, renew_repeat(2), True,
                    0.25, drop_connect_rate),
            MBBlock(renew_ch(40), renew_ch(80), 6, 3, 2, renew_repeat(3), True,
                    0.25, drop_connect_rate),
            MBBlock(renew_ch(80), renew_ch(112), 6, 5, 1, renew_repeat(3),
                    True, 0.25, drop_connect_rate),
            MBBlock(renew_ch(112), renew_ch(192), 6, 5, 2, renew_repeat(4),
                    True, 0.25, drop_connect_rate),
            MBBlock(renew_ch(192), renew_ch(320), 6, 3, 1, renew_repeat(1),
                    True, 0.25, drop_connect_rate))

        self.head = nn.Sequential(
            *conv_bn_act(renew_ch(320),
                         renew_ch(1280),
                         kernel_size=1,
                         bias=False), nn.AdaptiveAvgPool2d(1),
            nn.Dropout2d(dropout_rate, True)
            if dropout_rate > 0 else nn.Identity(), Flatten(),
            nn.Linear(renew_ch(1280), num_classes))

        self.init_weights()
示例#14
0
	def __init__(self):
		super(ConvNet, self).__init__()
		
		self.conv_layers == nn.ModuleList()
		self.flat = Flatten()
		
		self.conv_layers.append(ConvBlock(CHANNELS, 32, 8, 4, act = nn.ReLU()))
		self.conv_layers.append(ConvBlock(32, 64, 4, 2, act = nn.ReLU()))
		self.conv_layers.append(ConvBlock(64, 64, 3, 1, act = nn.ReLU()))
示例#15
0
 def __init__(self, config):
     super(MLP_VAE, self).__init__()
     self.__dict__.update(config)
     self.flat = Flatten()
     self.dense1 = nn.Linear(self.timesteps * self.input_dim, 64)
     self.densemu = nn.Linear(64, self.latent_dim)
     self.denselogvar = nn.Linear(64, self.latent_dim)
     self.dense2 = nn.Linear(self.latent_dim, 64)
     self.dense3 = nn.Linear(64, self.timesteps * self.input_dim)
     self.reshape = Reshape((self.timesteps, self.input_dim))
示例#16
0
    def load_pretrained(self, torch=False):
        if torch:
            # torch vision, train the same -> ~0.75 AUC on test
            self.backbone = densenet121(True).features
        else:
            # pretrainmodel, train -> 0.85 AUC on test
            self.backbone = pretrainedmodels.__dict__['densenet121']().features

        self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                                  nn.Linear(1024, 14))
示例#17
0
文件: networks.py 项目: JmfanBU/colt
def IBP_large(in_ch, in_dim, linear_size=512):
    model = nn.Sequential(
        nn.Conv2d(in_ch, 64, 3, stride=1, padding=1), nn.ReLU(),
        nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.ReLU(),
        nn.Conv2d(64, 128, 3, stride=2, padding=1), nn.ReLU(),
        nn.Conv2d(128, 128, 3, stride=1, padding=1), nn.ReLU(),
        nn.Conv2d(128, 128, 3, stride=1, padding=1), nn.ReLU(), Flatten(),
        nn.Linear((in_dim // 2) * (in_dim // 2) * 128, linear_size), nn.ReLU(),
        nn.Linear(linear_size, 10))
    return model
示例#18
0
 def __init__(self, config):
     super(MLP_AE,self).__init__()
     self.__dict__.update(config)
     self.encoder = nn.Sequential(
         Flatten(),
         nn.Linear(self.timesteps * self.input_dim, self.units_enc),
         nn.Linear(self.units_enc, self.latent_dim),
         )
     self.decoder = nn.Sequential(             
         nn.Linear(self.latent_dim,self.units_dec),
         nn.Linear(self.units_dec, self.timesteps * self.input_dim),
         Reshape((self.timesteps, self.input_dim))
         )
示例#19
0
文件: networks.py 项目: JmfanBU/colt
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
    """
    CNN, small 2-layer (default kernel size is 4 by 4)
    Parameter:
        in_ch: input image channel, 1 for MNIST and 3 for CIFAR
        in_dim: input dimension, 28 for MNIST and 32 for CIFAR
        width: width multiplier
    """
    model = nn.Sequential(
        nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1), nn.ReLU(),
        nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1), nn.ReLU(),
        Flatten(),
        nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
        nn.ReLU(), nn.Linear(linear_size, 10))
    return model
示例#20
0
    def _build_encoder(self):
        """
        CNN encoder

        Conv1 -> ReLU -> MaxPool1 -> Conv2 -> ReLU -> MaxPool2 ->
            Flatten -> FC1 -> ReLU -> FC2
        """
        self.encoder = OrderedDict()
        self.encoder["Conv1"] = Conv2D(
            act_fn=ReLU(),
            init=self.init,
            pad=self.enc_conv1_pad,
            optimizer=self.optimizer,
            out_ch=self.enc_conv1_out_ch,
            stride=self.enc_conv1_stride,
            kernel_shape=self.enc_conv1_kernel_shape,
        )
        self.encoder["Pool1"] = Pool2D(
            mode="max",
            optimizer=self.optimizer,
            stride=self.enc_pool1_stride,
            kernel_shape=self.enc_pool1_kernel_shape,
        )
        self.encoder["Conv2"] = Conv2D(
            act_fn=ReLU(),
            init=self.init,
            pad=self.enc_conv2_pad,
            optimizer=self.optimizer,
            out_ch=self.enc_conv2_out_ch,
            stride=self.enc_conv2_stride,
            kernel_shape=self.enc_conv2_kernel_shape,
        )
        self.encoder["Pool2"] = Pool2D(
            mode="max",
            optimizer=self.optimizer,
            stride=self.enc_pool2_stride,
            kernel_shape=self.enc_pool2_kernel_shape,
        )
        self.encoder["Flatten3"] = Flatten(optimizer=self.optimizer)
        self.encoder["FC4"] = FullyConnected(
            n_out=self.latent_dim, act_fn=ReLU(), optimizer=self.optimizer
        )
        self.encoder["FC5"] = FullyConnected(
            n_out=self.T * 2,
            optimizer=self.optimizer,
            act_fn=Affine(slope=1, intercept=0),
            init=self.init,
        )
示例#21
0
 def add(self, layer_type, **kwargs):
     layer = None
     
     # send the layer the output shape of the last layer, or the input shape if it's the first
     kwargs['input_shape'] = self._layers[-1].output_shape if len(self._layers) else self._input_shape
     
     if layer_type == 'conv':
         layer = Conv2D(**kwargs)
     elif layer_type == 'dense':
         layer = Dense(**kwargs)
     elif layer_type == 'pool':
         layer = MaxPooling2D(**kwargs)
     elif layer_type == 'flat':
         layer = Flatten(**kwargs)
     
     self._layers.append(layer)
示例#22
0
文件: networks.py 项目: JmfanBU/colt
    def __init__(self,
                 device,
                 dataset,
                 n_class=10,
                 input_size=32,
                 input_channel=3,
                 width1=1,
                 width2=1,
                 width3=1,
                 linear_size=100):
        super(ConvMedBig, self).__init__()

        mean, sigma = get_mean_sigma(device, dataset)
        self.normalizer = Normalization(mean, sigma)

        layers = [
            Normalization(mean, sigma),
            Conv2d(input_channel,
                   16 * width1,
                   3,
                   stride=1,
                   padding=1,
                   dim=input_size),
            ReLU((16 * width1, input_size, input_size)),
            Conv2d(16 * width1,
                   16 * width2,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((16 * width2, input_size // 2, input_size // 2)),
            Conv2d(16 * width2,
                   32 * width3,
                   4,
                   stride=2,
                   padding=1,
                   dim=input_size // 2),
            ReLU((32 * width3, input_size // 4, input_size // 4)),
            Flatten(),
            Linear(32 * width3 * (input_size // 4) * (input_size // 4),
                   linear_size),
            ReLU(linear_size),
            Linear(linear_size, n_class),
        ]
        self.blocks = Sequential(*layers)
示例#23
0
文件: networks.py 项目: JmfanBU/colt
def model_cnn_4layer(in_ch, in_dim, width, linear_size):
    """
    CNN, relatively large 4-layer
    Parameter:
        in_ch: input image channel, 1 for MNIST and 3 for CIFAR
        in_dim: input dimension, 28 for MNIST and 32 for CIFAR
        width: width multiplier
    """
    model = nn.Sequential(
        nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1), nn.ReLU(),
        nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1), nn.ReLU(),
        nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1), nn.ReLU(),
        nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1), nn.ReLU(),
        Flatten(),
        nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
        nn.ReLU(), nn.Linear(linear_size, linear_size), nn.ReLU(),
        nn.Linear(linear_size, 10))
    return model
def vgg_bn():
    return [
	#1
        Conv2D([7, 7], 64, [1, 3, 3, 1]),
	Conv2DBatchNorm(64),
	Activation(tf.nn.relu),
	MaxPool([1,4,4,1],[1,1,1,1]),
	
	#2
	Convolutional_block(f = 3, filters = [64,64,256],s = 1),
	MaxPool([1,5,5,1],[1,1,1,1]),
	Dropout(0.5),
	Identity_block(f = 3, filters=[64,64,256]),
	Dropout(0.5),

	Identity_block(f = 3, filters=[64,64,256]),
	Dropout(0.5),
	MaxPool([1,2,2,1],[1,1,1,1]),
	#3
	Convolutional_block(f = 3, filters = [128,128,512],s = 2),
	Dropout(0.5),
	Identity_block(f = 3, filters=[128,128,512]),
	Dropout(0.5),
	Identity_block(f = 3, filters=[128,128,512]),
	Dropout(0.5),
	MaxPool([1,2,2,1],[1,1,1,1]),

	#4
	Convolutional_block(f = 3, filters = [256,256,1024],s = 2),
	Identity_block(f = 3, filters=[256,256,1024]),
	Identity_block(f = 3, filters=[256,256,1024]),
	Identity_block(f = 3, filters=[256,256,1024]),
	Identity_block(f = 3, filters=[256,256,1024]),
	Identity_block(f = 3, filters=[256,256,1024]),
        Flatten(),
        Dense(128),
        Activation(tf.sigmoid),

        Dropout(0.5),

        Dense(10),
	#Fully_connected(),
        Activation(tf.nn.softmax),
    ]
示例#25
0
def make_cnn(X_dim, num_class):
    conv = Conv(X_dim,
                n_filter=16,
                h_filter=5,
                w_filter=5,
                stride=1,
                padding=2)
    relu = ReLU()
    maxpool = Maxpool(conv.out_dim, size=2, stride=2)
    conv2 = Conv(maxpool.out_dim,
                 n_filter=20,
                 h_filter=5,
                 w_filter=5,
                 stride=1,
                 padding=2)
    relu2 = ReLU()
    maxpool2 = Maxpool(conv2.out_dim, size=2, stride=2)
    flat = Flatten()
    fc = FullyConnected(np.prod(maxpool2.out_dim), num_class)

    return [conv, relu, maxpool, conv2, relu2, maxpool2, flat, fc]
示例#26
0
def modFC(): # {{{
	inRep = Representation(shape=(3, 32, 32))
	flow = Flatten()(inRep)

	for w in fcwidths:
		flow = fcModule(flow, w, baseDropout)

	if mirroring:
		flow = CReLU()(flow)
	outRep = FC(10, initialisation=init, initKWArgs=initKWArgs, reg=regP, regFunction=regF)(flow)
	if observing and not resNet:
		outRep = Observation()(outRep)

	return NN(
		inRep
	,	outRep
	,	archiver=arc
	,	optimiser=opt
	,	objective=obj
	,	postEpochFunctions=pefs
	) # }}}
    def build_model(self) -> nn.Module:
        model = nn.Sequential(
            nn.Conv2d(1, self.context.get_hparam("n_filters1"), 3, 1),
            nn.ReLU(),
            nn.Conv2d(
                self.context.get_hparam("n_filters1"), self.context.get_hparam("n_filters2"), 3,
            ),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Dropout2d(self.context.get_hparam("dropout1")),
            Flatten(),
            nn.Linear(144 * self.context.get_hparam("n_filters2"), 128),
            nn.ReLU(),
            nn.Dropout2d(self.context.get_hparam("dropout2")),
            nn.Linear(128, 10),
            nn.LogSoftmax(),
        )

        # If loading backbone weights, do not call reset_parameters() or
        # call before loading the backbone weights.
        reset_parameters(model)
        return model
    def build_model():
        tf.compat.v1.reset_default_graph()
        x = tf.compat.v1.placeholder(tf.float32, [None, 32, 32, 3]) 
        t = tf.compat.v1.placeholder(tf.float32, [None, 10]) 
        is_training = tf.compat.v1.placeholder(tf.bool)
        layers = [
            Conv((3, 3, 3, 64), tf.nn.relu),
            Conv((3, 3, 64, 64), tf.nn.relu),
            Pooling((1, 2, 2, 1)),
            Conv((3, 3, 64, 128), tf.nn.relu),
            Conv((3, 3, 128, 128), tf.nn.relu),
            Pooling((1, 2, 2, 1)),
            Flatten(),
            Dense(3200, 256, tf.nn.relu),
            Dense(256, 256, tf.nn.relu),
            Dense(256, 10, tf.nn.softmax)
        ]
        y = f_props(layers, x)
        
        params = get_params(layers)
        cost = - tf.reduce_mean(tf.reduce_sum(t * tf_log(y), axis=1))

        return x, t, is_training, y, cost, params
示例#29
0
    def __init__(self, context: det.TrialContext) -> None:
        super().__init__()
        # Set hyperparameters that influence the model architecture.
        self.n_filters1 = context.get_hparam("n_filters1")
        self.n_filters2 = context.get_hparam("n_filters2")
        self.dropout = context.get_hparam("dropout")

        # Define the central model.
        self.model = nn.Sequential(
            nn.Conv2d(1, self.n_filters1, kernel_size=5),
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.Conv2d(self.n_filters1, self.n_filters2, kernel_size=5),
            nn.MaxPool2d(2),
            nn.ReLU(),
            Flatten(),
            nn.Linear(16 * self.n_filters2, 50),
            nn.ReLU(),
            nn.Dropout2d(self.dropout),
        )  # type: nn.Sequential
        # Predict digit labels from self.model.
        self.digit = nn.Sequential(nn.Linear(50, 10), nn.Softmax(dim=0))
        # Predict binary labels from self.model.
        self.binary = nn.Sequential(nn.Linear(50, 1), nn.Sigmoid(), Squeeze())
示例#30
0
    def build_model(self) -> nn.Module:
        model = nn.Sequential(
            nn.Conv2d(1, pedl.get_hyperparameter("n_filters1"), kernel_size=5),
            nn.MaxPool2d(2),
            nn.ReLU(),
            nn.Conv2d(
                pedl.get_hyperparameter("n_filters1"),
                pedl.get_hyperparameter("n_filters2"),
                kernel_size=5,
            ),
            nn.MaxPool2d(2),
            nn.ReLU(),
            Flatten(),
            nn.Linear(16 * pedl.get_hyperparameter("n_filters2"), 50),
            nn.ReLU(),
            nn.Dropout2d(pedl.get_hyperparameter("dropout")),
            nn.Linear(50, 10),
            nn.LogSoftmax(),
        )

        # If loading backbone weights, do not call reset_parameters() or
        # call before loading the backbone weights.
        reset_parameters(model)
        return model