Exemplo n.º 1
0
    def __init__(self, fc_params, output_params, flatten=False):
        super(FcBlockWOutput, self).__init__()
        input_size = fc_params[0]
        output_size = fc_params[1]

        add_output = output_params[0]
        num_classes = output_params[1]
        self.output_id = output_params[2]
        self.depth = 1

        fc_layers = []

        if flatten:
            fc_layers.append(af.Flatten())

        fc_layers.append(nn.Linear(input_size, output_size))
        fc_layers.append(nn.ReLU())
        fc_layers.append(nn.Dropout(0.5))
        self.layers = nn.Sequential(*fc_layers)

        if add_output:
            self.output = nn.Linear(output_size, num_classes)
            self.no_output = False
        else:
            self.output = nn.Sequential()
            self.forward = self.only_forward
            self.no_output = True
    def __init__(self, total_size, ics, init_weights=True):
        super(DenseNet, self).__init__()
        self.total_size = total_size
        self.init_weights = init_weights
        self.ics = ics
        self.num_ics = sum(self.ics)
        self.num_class = 10
        self.num_output = 0

        self.train_func = mf.iter_training_0
        self.test_func = mf.sdn_test

        self.input_size = 32
        self.in_channels = 16
        self.cum_in_channels = self.in_channels

        self.init_conv = nn.Sequential(*[
            nn.Conv2d(3, self.in_channels, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(self.in_channels),
            nn.ReLu()
        ])

        self.end_layers = nn.Sequential(*[
            nn.AvgPool2d(kernel_size=8),
            af.Flatten(),
            nn.linear(2560, self.num_class)
        ])
        self.grow()

        if self.init_weights:
            self._init_weights(self.modules())
Exemplo n.º 3
0
    def __init__(self, params):
        super(ResNet_SDN, self).__init__()

        self.ic_only = False

        self.num_blocks = params['num_blocks']
        self.num_classes = int(params['num_classes'])
        self.augment_training = params['augment_training']
        self.input_size = int(params['input_size'])
        self.block_type = params['block_type']
        self.add_out_nonflat = params['add_ic']
        self.add_output = [item for sublist in self.add_out_nonflat for item in sublist]

        self.init_weights = params['init_weights']
        self.train_func = mf.sdn_train
        self.in_channels = 16
        self.num_output = sum(self.add_output) + 1
        self.test_func = mf.sdn_test

        self.init_depth = 1
        self.end_depth = 1
        self.cur_output_id = 0

        if self.block_type == 'basic':
            self.block = BasicBlockWOutput

        init_conv = []

        if self.input_size == 32:  # cifar10
            self.cur_input_size = self.input_size
            init_conv.append(nn.Conv2d(3, self.in_channels, kernel_size=3, stride=1, padding=1, bias=False))
        else:  # tiny imagenet
            self.cur_input_size = int(self.input_size / 2)
            init_conv.append(nn.Conv2d(3, self.in_channels, kernel_size=3, stride=2, padding=1, bias=False))

        init_conv.append(nn.BatchNorm2d(self.in_channels))
        init_conv.append(nn.ReLU())

        self.init_conv = nn.Sequential(*init_conv)

        self.layers = nn.ModuleList()
        self.layers.extend(self._make_layer(self.in_channels, block_id=0, stride=1))

        self.cur_input_size = int(self.cur_input_size/2)
        self.layers.extend(self._make_layer(32, block_id=1, stride=2))
        
        self.cur_input_size = int(self.cur_input_size/2)
        self.layers.extend(self._make_layer(64, block_id=2, stride=2))

        end_layers = []

        end_layers.append(nn.AvgPool2d(kernel_size=8))
        end_layers.append(af.Flatten())
        end_layers.append(nn.Linear(16 * self.block.expansion, self.num_classes))
        # end_layers.append(nn.Linear(64*self.block.expansion, self.num_classes))
        self.end_layers = nn.Sequential(*end_layers)

        if self.init_weights:
            self.initialize_weights()
    def __init__(self, params):
        super(WideResNet, self).__init__()
        self.num_blocks = params['num_blocks']
        self.widen_factor = params['widen_factor']
        self.num_classes = int(params['num_classes'])
        self.dropout_rate = params['dropout_rate']
        self.augment_training = params['augment_training']
        self.input_size = int(params['input_size'])
        self.train_func = mf.cnn_train
        self.test_func = mf.cnn_test
        self.in_channels = 16
        self.num_output = 1

        if self.input_size == 32:  # cifar10 and cifar100
            self.init_conv = nn.Conv2d(3,
                                       self.in_channels,
                                       kernel_size=3,
                                       stride=1,
                                       padding=1,
                                       bias=True)
        elif self.input_size == 64:  # tiny imagenet
            self.init_conv = nn.Conv2d(3,
                                       self.in_channels,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       bias=True)

        self.layers = nn.ModuleList()
        self.layers.extend(
            self._wide_layer(wide_basic,
                             self.in_channels * self.widen_factor,
                             block_id=0,
                             stride=1))
        self.layers.extend(
            self._wide_layer(wide_basic,
                             32 * self.widen_factor,
                             block_id=1,
                             stride=2))
        self.layers.extend(
            self._wide_layer(wide_basic,
                             64 * self.widen_factor,
                             block_id=2,
                             stride=2))

        end_layers = []

        end_layers.append(nn.BatchNorm2d(64 * self.widen_factor, momentum=0.9))
        end_layers.append(nn.ReLU(inplace=True))
        end_layers.append(nn.AvgPool2d(kernel_size=8))
        end_layers.append(af.Flatten())
        end_layers.append(nn.Linear(64 * self.widen_factor, self.num_classes))
        self.end_layers = nn.Sequential(*end_layers)

        self.initialize_weights()
Exemplo n.º 5
0
    def __init__(self, params):
        super(ResNet, self).__init__()
        self.num_blocks = params['num_blocks']
        self.num_classes = int(params['num_classes'])
        self.augment_training = params['augment_training']
        self.input_size = int(params['input_size'])
        self.block_type = params['block_type']
        self.train_func = mf.cnn_train
        self.test_func = mf.cnn_test
        self.in_channels = 16
        self.num_output = 1

        if self.block_type == 'basic':
            self.block = BasicBlock

        init_conv = []

        if self.input_size == 32:  # cifar10 and cifar100
            init_conv.append(
                nn.Conv2d(3,
                          self.in_channels,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False))
        elif self.input_size == 64:  # tiny imagenet
            init_conv.append(
                nn.Conv2d(3,
                          self.in_channels,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False))

        init_conv.append(nn.BatchNorm2d(self.in_channels))
        init_conv.append(nn.ReLU(inplace=True))

        self.init_conv = nn.Sequential(*init_conv)

        self.layers = nn.ModuleList()
        self.layers.extend(
            self._make_layer(self.in_channels, block_id=0, stride=1))
        self.layers.extend(self._make_layer(32, block_id=1, stride=2))
        self.layers.extend(self._make_layer(64, block_id=2, stride=2))

        end_layers = []

        end_layers.append(nn.AvgPool2d(kernel_size=8))
        end_layers.append(af.Flatten())
        end_layers.append(
            nn.Linear(64 * self.block.expansion, self.num_classes))
        self.end_layers = nn.Sequential(*end_layers)

        self.initialize_weights()
    def __init__(self, fc_params, flatten):
        super(FcBlock, self).__init__()
        input_size = int(fc_params[0])
        output_size = int(fc_params[1])

        fc_layers = []
        if flatten:
            fc_layers.append(af.Flatten())
        fc_layers.append(nn.Linear(input_size, output_size))
        fc_layers.append(nn.ReLU())
        fc_layers.append(nn.Dropout(0.5))
        self.layers = nn.Sequential(*fc_layers)
Exemplo n.º 7
0
    def __init__(self,
                 args,
                 x,
                 internal_fm,
                 nz_post,
                 net_size=1,
                 category=200,
                 pd=20,
                 device='cuda',
                 target_channel=128,
                 share=False):
        super(MNIconfidence, self).__init__()
        self.share = share
        # print('Share parameter: ', self.share)
        self.net_size = net_size
        self.args = args
        self.internal_fm = internal_fm
        self.nz_post = nz_post
        self.num_class = len(nz_post)
        self.target_channel = target_channel
        self.device = device
        self.pe = af.position_encoding(self.num_class, pd).to(self.device)
        self.input_shape_list = self._get_input_shapes()
        self.used_shape_list = self._get_used_shapes()
        self.smallest_fm_shape = self._get_smallest_shape()
        self.flatten_size = target_channel * self.smallest_fm_shape * self.smallest_fm_shape

        self.x_module = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=target_channel,
                      kernel_size=3,
                      padding=1), nn.BatchNorm2d(target_channel), nn.ReLU(),
            nn.AdaptiveAvgPool2d(self.smallest_fm_shape), af.Flatten(),
            nn.Linear(self.flatten_size, self.flatten_size))

        self.unshared_module_list = nn.ModuleList()

        for i in range(self.num_class):
            confidence_module = nn.Sequential(
                nn.Linear(category + pd + 3, 200 * net_size), nn.ReLU(),
                nn.Dropout(0.7), nn.Linear(200 * net_size, 100 * net_size),
                nn.ReLU(), nn.Dropout(0.7))
            self.unshared_module_list.append(confidence_module)

        self.shared_module = nn.Sequential(
            nn.Linear(category + pd + 3, 200 * net_size), nn.ReLU(),
            nn.Dropout(0.8), nn.Linear(200 * net_size, 100 * net_size),
            nn.ReLU(), nn.Dropout(0.8))
        self.last_layer = nn.Sequential(
            nn.Linear(100 * self.num_class * net_size, self.num_class))
    def __init__(self, params):
        super(MobileNet_SDN, self).__init__()
        self.cfg = params['cfg']
        self.num_classes = int(params['num_classes'])
        self.augment_training = params['augment_training']
        self.input_size = int(params['input_size'])
        self.add_output = params['add_ic']
        self.train_func = mf.sdn_train
        self.test_func = mf.sdn_test
        self.num_output = sum(self.add_output) + 1
        self.in_channels = 32
        self.cur_input_size = self.input_size

        self.init_depth = 1
        self.end_depth = 1
        self.cur_output_id = 0

        init_conv = []
        init_conv.append(
            nn.Conv2d(3,
                      self.in_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False))
        init_conv.append(nn.BatchNorm2d(self.in_channels))
        init_conv.append(nn.ReLU(inplace=True))
        self.init_conv = nn.Sequential(*init_conv)

        self.layers = nn.ModuleList()
        self.layers.extend(self._make_layers(in_channels=self.in_channels))

        end_layers = []
        if self.input_size == 32:  # cifar10 and cifar100
            end_layers.append(nn.AvgPool2d(2))
        elif self.input_size == 64:  # tiny imagenet
            end_layers.append(nn.AvgPool2d(4))

        end_layers.append(af.Flatten())
        end_layers.append(nn.Linear(1024, self.num_classes))
        self.end_layers = nn.Sequential(*end_layers)
Exemplo n.º 9
0
    def __init__(self, params):

        super(ResNet_Baseline, self).__init__()
        self.ic_only = False
        self.augment_training = params['augment_training']
        self.init_weights = params['init_weights']
        self.block_type = params['block_type']
        self.init_type = params['init_type']
        self.total_size = params['size']  # the size to reach (number of units)
        self.ics = params['ics'] if 'ics' in params else []
        self.num_ics = sum(self.ics)
        self.prune = params['prune']

        if self.prune:
            self.keep_ratio = params["keep_ratio"]

        if 'mode' in params:
            self.mode = params['mode']
        else:
            self.mode = 0

        if self.init_type != 'full' and len(self.ics) != self.total_size:
            raise ValueError(
                "final size of network does not match the length of ics array: {}; {}".format(self.total_size,
                                                                                              self.ics))

        self.num_class = 10

        self.num_output = 0

        if self.block_type == 'basic':
            self.block = resNet.BasicBlockWOutput

        self.input_size = 32  # cifar10
        self.in_channels = 16

        init_conv = []
        init_conv.append(nn.Conv2d(3, self.in_channels, kernel_size=3, stride=1, padding=1, bias=False))
        init_conv.append(nn.BatchNorm2d(self.in_channels))
        init_conv.append(nn.ReLU())

        end_layers = []
        end_layers.append(nn.AvgPool2d(kernel_size=8))
        end_layers.append(af.Flatten())
        if self.init_type == 'dense':
            end_layers.append(nn.Linear(256 * (self.total_size + 1) * self.block.expansion, self.num_class))
        else:
            end_layers.append(nn.Linear(256 * self.block.expansion, self.num_class))

        self.init_conv = nn.Sequential(*init_conv)
        self.layers = nn.ModuleList()
        self.end_layers = nn.Sequential(*end_layers)

        train_funcs = {
            '0': mf.iter_training_0,
            '1': mf.iter_training_4
        }

        if self.init_type == "full":
            self.train_func = mf.cnn_train
            self.test_func = mf.cnn_test
            layers = [self.block(self.in_channels,
                                 16, (False, self.num_class, 32, 1)) for _ in range(self.total_size)]
            self.layers.extend(layers)
            self.num_output = 1
        elif self.init_type == "full_ic":
            self.train_func = mf.sdn_train
            self.test_func = mf.sdn_test
            layers = [self.block(self.in_channels,
                                 16, (self.ics[i], self.num_class, 32, 1)) for i in range(self.total_size)]
            self.layers.extend(layers)
            self.num_output = sum(self.ics) + 1
        elif self.init_type == "iterative":
            self.train_func = train_funcs[self.mode]
            print("mode function: {}".format(self.train_func))
            self.test_func = mf.sdn_test
            self.grow()
        elif self.init_type == "dense":
            self.train_func = train_funcs[self.mode]
            print("mode function: {}".format(self.train_func))
            self.test_func = mf.sdn_test
            self.grow()
        else:
            raise KeyError(
                "the init_type should be either 'full', 'full_ic' or 'iterative' and it is: {}".format(self.init_type))

        self.to_eval()

        if self.init_weights:
            self._init_weights(self.modules())
Exemplo n.º 10
0
    def __init__(self,
                 args,
                 x,
                 internal_fm,
                 nz_post,
                 category=200,
                 pd=20,
                 net_size=1,
                 device='cuda',
                 target_channel=128,
                 share=False):
        super(MulticlassNetImage, self).__init__()
        self.args = args
        self.internal_fm = internal_fm
        self.nz_post = nz_post
        self.num_class = len(nz_post)
        self.target_channel = target_channel
        self.device = device
        self.pe = af.position_encoding(self.num_class, pd).to(self.device)
        self.input_shape_list = self._get_input_shapes()
        self.used_shape_list = self._get_used_shapes()
        self.smallest_fm_shape = self._get_smallest_shape()
        self.fm_module_list = nn.ModuleList()
        self.flatten_size = target_channel * self.smallest_fm_shape * self.smallest_fm_shape

        self.x_module = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=target_channel,
                      kernel_size=3,
                      padding=1), nn.BatchNorm2d(target_channel), nn.ReLU(),
            nn.AdaptiveAvgPool2d(self.smallest_fm_shape), af.Flatten(),
            nn.Linear(self.flatten_size, self.flatten_size))

        for shape in self.used_shape_list:
            if len(shape) == 4:
                fm_module = nn.Sequential(
                    nn.Conv2d(in_channels=shape[1],
                              out_channels=target_channel,
                              kernel_size=3,
                              padding=1), nn.BatchNorm2d(target_channel),
                    nn.ReLU(), nn.AdaptiveAvgPool2d(self.smallest_fm_shape),
                    af.Flatten(),
                    nn.Linear(self.flatten_size, self.flatten_size))
            if len(shape) == 2:
                fm_module = nn.Sequential(
                    nn.Linear(shape[1], self.flatten_size), nn.ReLU())
            self.fm_module_list.append(fm_module)

        self.total_channels = target_channel * (len(self.used_shape_list) + 1)

        self.final_module = nn.Sequential(
            nn.Conv2d(in_channels=self.total_channels,
                      out_channels=target_channel,
                      kernel_size=3,
                      padding=1), nn.BatchNorm2d(target_channel), nn.ReLU(),
            nn.Dropout(0.8), af.Flatten(),
            nn.Linear(self.flatten_size, self.num_class))

        self.xhs_module_fc = nn.Sequential(
            nn.Linear(category * self.num_class, category), nn.ReLU(),
            nn.Dropout(0.7), nn.Linear(category, self.num_class))

        self.encoder_layer = nn.TransformerEncoderLayer(category, 4)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, 2)

        self.xhs_module_conv = nn.Sequential(
            nn.Conv1d(in_channels=category, out_channels=50, kernel_size=1),
            nn.ReLU(), nn.Dropout(0.7), af.Flatten(),
            nn.Linear(self.num_class * 50, self.num_class))

        self.shared_module = nn.Sequential(
            nn.Linear(category + pd + 3 + self.flatten_size, 400), nn.ReLU(),
            nn.Dropout(0.8), nn.Linear(400, 200), nn.ReLU(), nn.Dropout(0.8))

        self.shared_module_fm = nn.Sequential(
            nn.Linear(self.flatten_size * 2 + 3, 400), nn.ReLU(),
            nn.Dropout(0.8), nn.Linear(400, 200), nn.ReLU(), nn.Dropout(0.8))

        self.last_layer = nn.Sequential(
            nn.Linear(200 * self.num_class, self.num_class))