示例#1
0
    def __init__(self, in_size, out_size, batch_norm=True, stride=1, activ='Relu',ksize=3):
        super(ResidualBlock, self).__init__()

        layers = []
        #1
        conv = nn.Conv2d(in_size, out_size, kernel_size=ksize, stride=stride, padding=1)
        layers.append(conv)
        if batch_norm:
            bn = nn.BatchNorm2d(out_size)
            layers.append(bn)
        layers.append(misc.activation(activ))
        #2
        conv = nn.Conv2d(out_size, out_size, kernel_size=ksize, stride=1, padding=1)
        layers.append(conv)
        if batch_norm:
            bn = nn.BatchNorm2d(out_size)
            layers.append(bn)

        self.layers = nn.Sequential(*layers)

        if in_size != out_size:
            self.downsample = nn.Conv2d(in_size, out_size, kernel_size=1, stride=stride, padding=0)
        else:
            self.downsample = None

        self.activ = misc.activation(activ)
示例#2
0
 def __init__(self, inplanes, k=12, activ='Relu', batch_norm=True):
     super(DenseBasicBlock, self).__init__()
     
     self.conv = nn.Conv2d(inplanes, k, kernel_size=3, padding=1, bias=not batch_norm)
     if batch_norm:
         self.bn = nn.BatchNorm2d(k)
     else:
         self.bn = None
     self.activ = misc.activation(activ)
示例#3
0
    def __init__(self, args, x_size):

        NetworkBase.__init__(self, args)

        self.k = args.dense_k

        #only ones supported
        args.batch_norm_func = 'var'

        layers = []
        #initial layer
        cur_sz = x_size[1]
        layers.append(nn.Conv2d(cur_sz, args.dense_k*2, kernel_size=3, stride=1, padding=1))
        if args.batch_norm:
            layers.append(nn.BatchNorm2d(args.dense_k*2))
        layers.append(misc.activation(args.activ))
        cur_sz = args.dense_k*2

        for i in range(args.dense_n):
            layers.append(DenseBasicBlock(cur_sz, args.dense_k, args.activ, args.batch_norm))
            cur_sz += args.dense_k
        
        layers.append(DenseTransitionBlock(cur_sz, args.dense_compression))
        cur_sz = int(cur_sz * args.dense_compression)

        for i in range(args.dense_n):
            layers.append(DenseBasicBlock(cur_sz, args.dense_k, args.activ, args.batch_norm))
            cur_sz += args.dense_k
        
        layers.append(DenseTransitionBlock(cur_sz, args.dense_compression))
        cur_sz = int(cur_sz * args.dense_compression)

        for i in range(args.dense_n):
            layers.append(DenseBasicBlock(cur_sz, args.dense_k, args.activ, args.batch_norm))
            cur_sz += args.dense_k

        layers.append(nn.AdaptiveAvgPool2d((1, 1)))
        layers.append(Flatten())
        layers.append(nn.Linear(cur_sz, args.fc_layers[-1]))

        self.layers = nn.Sequential(*layers)

        #loss
        self.recon_loss = nn.CrossEntropyLoss()
        
        #sets up node drop and l2 reg on layers
        self.find_layers(self.layers)
示例#4
0
    def __init__(self, args, x_size):

        NetworkBase.__init__(self, args)

        #only ones supported
        args.batch_norm_func = 'var'

        layers = []

        #initial layer
        cur_sz = x_size[1]
        layers.append(nn.Conv2d(cur_sz, 64, kernel_size=7, stride=2, padding=3))
        layers.append(nn.ReLU())
        layers.append(nn.MaxPool2d(kernel_size=3,stride=2))
        if args.batch_norm:
            layers.append(nn.BatchNorm2d(64))
        layers.append(misc.activation(args.activ))
        cur_sz = 64

        for i in [64]*3+[128]*8+[256]*36+[512]*3:
            sz = i
            stride = 1 if i == 0 else 2
            for j in range(args.res_n):
                layers.append(ResBlock131(cur_sz, sz,4*sz, batch_norm=args.batch_norm, stride=stride, activ=args.activ))
                stride = 1
                cur_sz = 4*sz

        layers.append(nn.AdaptiveAvgPool2d((1, 1)))
        layers.append(Flatten())
        layers.append(nn.Linear(cur_sz, args.fc_layers[-1]))

        self.layers = nn.Sequential(*layers)
        print(self)
        #loss
        self.recon_loss = nn.CrossEntropyLoss()

        #sets up node drop and l2 reg on layers
        self.find_layers(self.layers)
示例#5
0
    def __init__(self, args, x_size):

        NetworkBase.__init__(self, args)

        #only ones supported
        args.batch_norm_func = 'var'

        layers = []

        #initial layer
        cur_sz = x_size[1]
        layers.append(nn.Conv2d(cur_sz, 16, kernel_size=3, stride=1, padding=1))
        if args.batch_norm:
            layers.append(nn.BatchNorm2d(16))
        layers.append(misc.activation(args.activ))
        cur_sz = 16

        for i in range(3):
            sz = 16 * 2**i 
            stride = 1 if i == 0 else 2
            for j in range(args.res_n):
                layers.append(ResidualBlock(cur_sz, sz, batch_norm=args.batch_norm, stride=stride, activ=args.activ))
                stride = 1
                cur_sz = sz

        layers.append(nn.AdaptiveAvgPool2d((1, 1)))
        layers.append(Flatten())
        layers.append(nn.Linear(cur_sz, args.fc_layers[-1]))

        self.layers = nn.Sequential(*layers)

        #loss
        self.recon_loss = nn.CrossEntropyLoss()
        
        #sets up node drop and l2 reg on layers
        self.find_layers(self.layers)
示例#6
0
    def __init__(self, args, x_size):
        super(ClassifyConvNetwork, self).__init__(args)
        def conv2d_out_size(Hin, layer):
            return (Hin+2*layer.padding[0]-layer.dilation[0]*(layer.kernel_size[0]-1)-1) / (layer.stride[0])+1

        self.channel_in=x_size[1]
        cur_channel = x_size[1]
        self.batch_size = args.batch_size
        cur_height=x_size[2]
        
        #make max pool into a list if it isnt already
        if len(args.max_pool_every) == 1:
            args.max_pool_every = range(args.max_pool_every[0]-1, len(args.conv_layer_filters), args.max_pool_every[0])
        #make dropout into a list if it isnt already
        if len(args.dropout) == 1:
            args.dropout = [args.dropout[0] for i in range(len(args.conv_layer_filters)+len(args.fc_layers))]

        if (not args.batch_norm_no_mean_center) and args.batch_norm_func == 'var':
            batch_norm_func = nn.BatchNorm2d
        else:
            batch_norm_func = lambda x: misc.BatchNorm(x, mean_center=not args.batch_norm_no_mean_center, norm=args.batch_norm_func)

        layers = []
        for i, cvf in enumerate(args.conv_layer_filters):
            #no bias is batch norm since it includes a bias
            #conv = nn.Conv2d(cur_channel, cvf, kernel_size=args.filter_size, stride=args.stride, padding=args.conv_padding, bias=not args.batch_norm)
            conv = nn.Conv2d(cur_channel, cvf, kernel_size=args.filter_size, stride=args.stride, padding=args.conv_padding)
            layers.append(conv)
            cur_channel = cvf #update cur_channel for next iteration
            #compute the next height so we can make sure the network doent go negative
            next_height = conv2d_out_size(cur_height, layers[-1])
            print("conv: %d %d"%(cur_height,next_height))
            cur_height = next_height
            if next_height <= 0:
                raise ValueError('output height/width is <= 0')
            #batch norm
            if args.batch_norm:
                layer = batch_norm_func(cvf)
                layers.append(layer)
                
            #activations
            layers.append(misc.activation(args.activ))
            #max pool
            if(i in args.max_pool_every):
                cur_height/=2
                layers.append(nn.MaxPool2d(2))
                print(cur_height) 

            #dropout
            dropout = args.dropout[i]
            if dropout > 0:                
                #layers.append(AnnealedDropout(dropout))
                layers.append(nn.Dropout(dropout))

        self.im_size = (cur_channel, cur_height, cur_height)
        self.flat_size = int(cur_channel * cur_height * cur_height)
        self.conv_middle_size= int(cur_height * cur_height)

        layers.append(Flatten())

        if (not args.batch_norm_no_mean_center) and args.batch_norm_func == 'var':
            batch_norm_func = nn.BatchNorm1d
        else:
            batch_norm_func = lambda x: misc.BatchNorm(x, mean_center=not args.batch_norm_no_mean_center, norm=args.batch_norm_func)

        #fully connected layers layers
        cur_size=self.flat_size    
        for i, fc in enumerate(args.fc_layers):
            #no bias if batch norm since it has a bias
            layer = nn.Linear(cur_size, fc)
            layers.append(layer)
            cur_size = fc
            if i != len(args.fc_layers)-1:
                #batch norm
                if args.batch_norm:
                    bn_layer = batch_norm_func(fc)
                    layers.append(bn_layer)
                #activations
                layers.append(misc.activation(args.activ))
                #dropout
                dropout = args.dropout[i+len(args.conv_layer_filters)]
                if dropout > 0:
                    #layers.append(AnnealedDropout(args.all_dropout))
                    layers.append(nn.Dropout(dropout))

        self.layers = nn.Sequential(*layers)

        #loss
        self.recon_loss = nn.CrossEntropyLoss()

        #sets up node drop and l2 reg on layers
        self.find_layers(self.layers)

        #initialize network
        for p in self.parameters():
            if p.dim() >= 2:
                nn.init.xavier_uniform_(p)

        print(self)