Example #1
0
 def create_network(self, blocks):
     models = nn.ModuleList()
     for i, block in enumerate(blocks):
         if block['type'] == 'net':
             self.width = int(block['width'])
             self.height = int(block['height'])
             continue
         elif block['type'] == 'yolo':
             yololayer = YoloLayer(use_cuda=self.use_cuda)
             anchors = block['anchors'].split(',')
             anchor_mask = block['mask'].split(',')
             yololayer.anchor_mask = [int(i) for i in anchor_mask]
             yololayer.anchors = [float(i) for i in anchors]
             yololayer.num_classes = int(block['classes'])
             yololayer.num_anchors = int(block['num'])
             yololayer.anchor_step = len(yololayer.anchors)//yololayer.num_anchors
             try:
                 yololayer.rescore = int(block['rescore'])
             except:
                 pass
             yololayer.nth_layer = i
             yololayer.ignore_thresh = float(block['ignore_thresh'])
             yololayer.truth_thresh = float(block['truth_thresh'])
             yololayer.net_width = self.width
             yololayer.net_height = self.height
             models.append(yololayer)
     return models
Example #2
0
    def create_network(self, blocks):
        models = nn.ModuleList()

        prev_filters = 3
        out_filters = []
        prev_stride = 1
        out_strides = []
        conv_id = 0
        for block in blocks:
            if block['type'] == 'net':
                prev_filters = int(block['channels'])
                continue
            elif block['type'] == 'convolutional':
                conv_id = conv_id + 1
                batch_normalize = int(block['batch_normalize'])
                filters = int(block['filters'])
                kernel_size = int(block['size'])
                stride = int(block['stride'])
                is_pad = int(block['pad'])
                pad = (kernel_size - 1) / 2 if is_pad else 0
                activation = block['activation']
                model = nn.Sequential()
                if batch_normalize:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters,
                                  filters,
                                  kernel_size,
                                  stride,
                                  pad,
                                  bias=False))
                    model.add_module('bn{0}'.format(conv_id),
                                     nn.BatchNorm2d(filters))
                    #model.add_module('bn{0}'.format(conv_id), BN2d(filters))
                else:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters, filters, kernel_size, stride,
                                  pad))
                if activation == 'leaky':
                    model.add_module('leaky{0}'.format(conv_id),
                                     nn.LeakyReLU(0.1, inplace=True))
                elif activation == 'relu':
                    model.add_module('relu{0}'.format(conv_id),
                                     nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'maxpool':
                pool_size = int(block['size'])
                stride = int(block['stride'])
                if stride > 1:
                    model = nn.MaxPool2d(pool_size, stride)
                else:
                    model = MaxPoolStride1()
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'avgpool':
                model = GlobalAvgPool2d()
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'softmax':
                model = nn.Softmax()
                out_strides.append(prev_stride)
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'cost':
                if block['_type'] == 'sse':
                    model = nn.MSELoss(size_average=True)
                elif block['_type'] == 'L1':
                    model = nn.L1Loss(size_average=True)
                elif block['_type'] == 'smooth':
                    model = nn.SmoothL1Loss(size_average=True)
                out_filters.append(1)
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'reorg':
                stride = int(block['stride'])
                prev_filters = stride * stride * prev_filters
                out_filters.append(prev_filters)
                prev_stride = prev_stride * stride
                out_strides.append(prev_stride)
                models.append(Reorg(stride))
            elif block['type'] == 'upsample':
                stride = int(block['stride'])
                out_filters.append(prev_filters)
                prev_stride = prev_stride / stride
                out_strides.append(prev_stride)
                #models.append(nn.Upsample(scale_factor=stride, mode='nearest'))
                models.append(Upsample(stride))
            elif block['type'] == 'route':
                layers = block['layers'].split(',')
                ind = len(models)
                layers = [
                    int(i) if int(i) > 0 else int(i) + ind for i in layers
                ]
                if len(layers) == 1:
                    prev_filters = out_filters[layers[0]]
                    prev_stride = out_strides[layers[0]]
                elif len(layers) == 2:
                    assert (layers[0] == ind - 1)
                    prev_filters = out_filters[layers[0]] + out_filters[
                        layers[1]]
                    prev_stride = out_strides[layers[0]]
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block['type'] == 'shortcut':
                ind = len(models)
                prev_filters = out_filters[ind - 1]
                out_filters.append(prev_filters)
                prev_stride = out_strides[ind - 1]
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block['type'] == 'connected':
                filters = int(block['output'])
                if block['activation'] == 'linear':
                    model = nn.Linear(prev_filters, filters)
                elif block['activation'] == 'leaky':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.LeakyReLU(0.1, inplace=True))
                elif block['activation'] == 'relu':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'region':
                loss = RegionLoss()
                anchors = block['anchors'].split(',')
                loss.anchors = [float(i) for i in anchors]
                loss.num_classes = int(block['classes'])
                loss.num_anchors = int(block['num'])
                loss.anchor_step = len(loss.anchors) / loss.num_anchors
                loss.object_scale = float(block['object_scale'])
                loss.noobject_scale = float(block['noobject_scale'])
                loss.class_scale = float(block['class_scale'])
                loss.coord_scale = float(block['coord_scale'])
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(loss)
            elif block['type'] == 'yolo':
                yolo_layer = YoloLayer()
                anchors = block['anchors'].split(',')
                anchor_mask = block['mask'].split(',')
                yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
                yolo_layer.anchors = [float(i) for i in anchors]
                yolo_layer.num_classes = int(block['classes'])
                yolo_layer.num_anchors = int(block['num'])
                yolo_layer.anchor_step = len(
                    yolo_layer.anchors) / yolo_layer.num_anchors
                yolo_layer.stride = prev_stride
                #yolo_layer.object_scale = float(block['object_scale'])
                #yolo_layer.noobject_scale = float(block['noobject_scale'])
                #yolo_layer.class_scale = float(block['class_scale'])
                #yolo_layer.coord_scale = float(block['coord_scale'])
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(yolo_layer)
            else:
                print('unknown type %s' % (block['type']))

        return models
Example #3
0
    def create_network(self, blocks):
        models = nn.ModuleList()

        prev_filters = 3
        out_filters = []
        prev_stride = 1
        out_strides = []
        conv_id = 0
        ind = -2
        for block in blocks:
            ind += 1
            if block['type'] == 'net':
                prev_filters = int(block['channels'])
                self.width = int(block['width'])
                self.height = int(block['height'])
                continue
            elif block['type'] == 'densenet':

                class FeatureDenseNet121(nn.Module):
                    def __init__(self):
                        super(FeatureDenseNet121, self).__init__()
                        self.densenet121 = torchvision.models.densenet121()

                    def forward(self, x):
                        x = self.densenet121.features(x)
                        return x

                def load_my_state_dict(net, state_dict):
                    own_state = net.state_dict()
                    for name, param in state_dict.items():
                        if name not in own_state:
                            continue
                        if isinstance(param, nn.Parameter):
                            # backwards compatibility for serialized parameters
                            param = param.data
                        own_state[name].copy_(param)
                    return net

                def dfs_freeze(model):
                    for name, child in model.named_children():
                        for param in child.parameters():
                            param.requires_grad = False
                        dfs_freeze(child)

                def dfs_unfreeze(model):
                    for name, child in model.named_children():
                        for param in child.parameters():
                            param.requires_grad = True
                        dfs_unfreeze(child)

                model = FeatureDenseNet121()
                dfs_unfreeze(model)
                self.pretrained_path = block['pretrained_path']
                gpu = None if torch.cuda.is_available() else 'cpu'
                checkpoint = torch.load(self.pretrained_path, map_location=gpu)
                load_my_state_dict(model, checkpoint)
                print("=> loaded chexnet weights")
                prev_filters = int(block['filters'])
                stride = int(block['stride'])
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)

            elif block['type'] == 'convolutional':
                conv_id = conv_id + 1
                batch_normalize = int(block['batch_normalize'])
                filters = int(block['filters'])
                kernel_size = int(block['size'])
                stride = int(block['stride'])
                is_pad = int(block['pad'])
                pad = (kernel_size - 1) // 2 if is_pad else 0
                activation = block['activation']
                model = nn.Sequential()
                if batch_normalize:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters,
                                  filters,
                                  kernel_size,
                                  stride,
                                  pad,
                                  bias=False))
                    model.add_module('bn{0}'.format(conv_id),
                                     nn.BatchNorm2d(filters))
                    #model.add_module('bn{0}'.format(conv_id), BN2d(filters))
                else:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters, filters, kernel_size, stride,
                                  pad))
                if activation == 'leaky':
                    model.add_module('leaky{0}'.format(conv_id),
                                     nn.LeakyReLU(0.1, inplace=True))
                elif activation == 'relu':
                    model.add_module('relu{0}'.format(conv_id),
                                     nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'maxpool':
                pool_size = int(block['size'])
                stride = int(block['stride'])
                if stride > 1:
                    model = nn.MaxPool2d(pool_size, stride)
                else:
                    model = MaxPoolStride1()
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'avgpool':
                model = GlobalAvgPool2d()
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'softmax':
                model = nn.Softmax()
                out_strides.append(prev_stride)
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'cost':
                if block['_type'] == 'sse':
                    model = nn.MSELoss(size_average=True)
                elif block['_type'] == 'L1':
                    model = nn.L1Loss(size_average=True)
                elif block['_type'] == 'smooth':
                    model = nn.SmoothL1Loss(size_average=True)
                out_filters.append(1)
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'reorg':
                stride = int(block['stride'])
                prev_filters = stride * stride * prev_filters
                out_filters.append(prev_filters)
                prev_stride = prev_stride * stride
                out_strides.append(prev_stride)
                models.append(Reorg(stride))
            elif block['type'] == 'upsample':
                stride = int(block['stride'])
                out_filters.append(prev_filters)
                prev_stride = prev_stride / stride
                out_strides.append(prev_stride)
                #models.append(nn.Upsample(scale_factor=stride, mode='nearest'))
                models.append(Upsample(stride))
            elif block['type'] == 'route':
                layers = block['layers'].split(',')
                ind = len(models)
                layers = [
                    int(i) if int(i) > 0 else int(i) + ind for i in layers
                ]
                if len(layers) == 1:
                    prev_filters = out_filters[layers[0]]
                    prev_stride = out_strides[layers[0]]
                elif len(layers) == 2:
                    assert (layers[0] == ind - 1)
                    prev_filters = out_filters[layers[0]] + out_filters[
                        layers[1]]
                    prev_stride = out_strides[layers[0]]
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block['type'] == 'shortcut':
                ind = len(models)
                prev_filters = out_filters[ind - 1]
                out_filters.append(prev_filters)
                prev_stride = out_strides[ind - 1]
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block['type'] == 'connected':
                filters = int(block['output'])
                if block['activation'] == 'linear':
                    model = nn.Linear(prev_filters, filters)
                elif block['activation'] == 'leaky':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.LeakyReLU(0.1, inplace=True))
                elif block['activation'] == 'relu':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(model)
            elif block['type'] == 'region':
                region_layer = RegionLayer(use_cuda=self.use_cuda)
                anchors = block['anchors'].split(',')
                region_layer.anchors = [float(i) for i in anchors]
                region_layer.num_classes = int(block['classes'])
                region_layer.num_anchors = int(block['num'])
                region_layer.anchor_step = len(
                    region_layer.anchors) // region_layer.num_anchors
                region_layer.rescore = int(block['rescore'])
                region_layer.object_scale = float(block['object_scale'])
                region_layer.noobject_scale = float(block['noobject_scale'])
                region_layer.class_scale = float(block['class_scale'])
                region_layer.coord_scale = float(block['coord_scale'])
                region_layer.thresh = float(block['thresh'])
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(region_layer)
            elif block['type'] == 'yolo':
                yolo_layer = YoloLayer(use_cuda=self.use_cuda)
                anchors = block['anchors'].split(',')
                anchor_mask = block['mask'].split(',')
                yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
                yolo_layer.anchors = [float(i) for i in anchors]
                yolo_layer.num_classes = int(block['classes'])
                yolo_layer.num_anchors = int(block['num'])
                yolo_layer.anchor_step = len(
                    yolo_layer.anchors) // yolo_layer.num_anchors
                try:
                    yolo_layer.rescore = int(block['rescore'])
                except:
                    pass
                yolo_layer.ignore_thresh = float(block['ignore_thresh'])
                yolo_layer.truth_thresh = float(block['truth_thresh'])
                yolo_layer.stride = prev_stride
                yolo_layer.nth_layer = ind
                yolo_layer.net_width = self.width
                yolo_layer.net_height = self.height
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(yolo_layer)
            else:
                print('unknown type %s' % (block['type']))

        return models
Example #4
0
    def create_network(self, blocks):
        models = nn.ModuleList()

        prev_filters = 3
        out_filters = []
        prev_stride = 1
        out_strides = []
        conv_id = 0
        for block in blocks:
            if block["type"] == "net":
                prev_filters = int(block["channels"])
                continue
            elif block["type"] == "convolutional":
                conv_id = conv_id + 1
                batch_normalize = int(block["batch_normalize"])
                filters = int(block["filters"])
                kernel_size = int(block["size"])
                stride = int(block["stride"])
                is_pad = int(block["pad"])
                pad = (kernel_size - 1) // 2 if is_pad else 0
                activation = block["activation"]
                model = nn.Sequential()
                if batch_normalize:
                    model.add_module(
                        "conv{0}".format(conv_id),
                        nn.Conv2d(prev_filters,
                                  filters,
                                  kernel_size,
                                  stride,
                                  pad,
                                  bias=False))
                    model.add_module("bn{0}".format(conv_id),
                                     nn.BatchNorm2d(filters))
                    #model.add_module("bn{0}".format(conv_id), BN2d(filters))
                else:
                    model.add_module(
                        "conv{0}".format(conv_id),
                        nn.Conv2d(prev_filters, filters, kernel_size, stride,
                                  pad))
                if activation == "leaky":
                    model.add_module("leaky{0}".format(conv_id),
                                     nn.LeakyReLU(0.1, inplace=True))
                elif activation == "relu":
                    model.add_module("relu{0}".format(conv_id),
                                     nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block["type"] == "maxpool":
                pool_size = int(block["size"])
                stride = int(block["stride"])
                if stride > 1:
                    model = nn.MaxPool2d(pool_size, stride)
                else:
                    model = MaxPoolStride1()
                out_filters.append(prev_filters)
                prev_stride = stride * prev_stride
                out_strides.append(prev_stride)
                models.append(model)
            elif block["type"] == "avgpool":
                model = GlobalAvgPool2d()
                out_filters.append(prev_filters)
                models.append(model)
            elif block["type"] == "softmax":
                model = nn.Softmax()
                out_strides.append(prev_stride)
                out_filters.append(prev_filters)
                models.append(model)
            elif block["type"] == "cost":
                if block["_type"] == "sse":
                    model = nn.MSELoss(size_average=True)
                elif block["_type"] == "L1":
                    model = nn.L1Loss(size_average=True)
                elif block["_type"] == "smooth":
                    model = nn.SmoothL1Loss(size_average=True)
                out_filters.append(1)
                out_strides.append(prev_stride)
                models.append(model)
            elif block["type"] == "reorg":
                stride = int(block["stride"])
                prev_filters = stride * stride * prev_filters
                out_filters.append(prev_filters)
                prev_stride = prev_stride * stride
                out_strides.append(prev_stride)
                models.append(Reorg(stride))
            elif block["type"] == "upsample":
                stride = int(block["stride"])
                out_filters.append(prev_filters)
                prev_stride = prev_stride // stride
                out_strides.append(prev_stride)
                #models.append(nn.Upsample(scale_factor=stride, mode="nearest"))
                models.append(Upsample(stride))
            elif block["type"] == "route":
                layers = block["layers"].split(",")
                ind = len(models)
                layers = [
                    int(i) if int(i) > 0 else int(i) + ind for i in layers
                ]
                if len(layers) == 1:
                    prev_filters = out_filters[layers[0]]
                    prev_stride = out_strides[layers[0]]
                elif len(layers) == 2:
                    assert (layers[0] == ind - 1)
                    prev_filters = out_filters[layers[0]] + out_filters[
                        layers[1]]
                    prev_stride = out_strides[layers[0]]
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block["type"] == "shortcut":
                ind = len(models)
                prev_filters = out_filters[ind - 1]
                out_filters.append(prev_filters)
                prev_stride = out_strides[ind - 1]
                out_strides.append(prev_stride)
                models.append(EmptyModule())
            elif block["type"] == "connected":
                filters = int(block["output"])
                if block["activation"] == "linear":
                    model = nn.Linear(prev_filters, filters)
                elif block["activation"] == "leaky":
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.LeakyReLU(0.1, inplace=True))
                elif block["activation"] == "relu":
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(model)
            elif block["type"] == "region":
                loss = RegionLoss()
                anchors = block["anchors"].split(",")
                loss.anchors = [float(i) for i in anchors]
                loss.num_classes = int(block["classes"])
                loss.num_anchors = int(block["num"])
                loss.anchor_step = len(loss.anchors) // loss.num_anchors
                loss.object_scale = float(block["object_scale"])
                loss.noobject_scale = float(block["noobject_scale"])
                loss.class_scale = float(block["class_scale"])
                loss.coord_scale = float(block["coord_scale"])
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(loss)
            elif block["type"] == "yolo":
                yolo_layer = YoloLayer()
                anchors = block["anchors"].split(",")
                anchor_mask = block["mask"].split(",")
                yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
                yolo_layer.anchors = [float(i) for i in anchors]
                yolo_layer.num_classes = int(block["classes"])
                yolo_layer.num_anchors = int(block["num"])
                yolo_layer.anchor_step = len(
                    yolo_layer.anchors) // yolo_layer.num_anchors
                yolo_layer.stride = prev_stride
                #yolo_layer.object_scale = float(block["object_scale"])
                #yolo_layer.noobject_scale = float(block["noobject_scale"])
                #yolo_layer.class_scale = float(block["class_scale"])
                #yolo_layer.coord_scale = float(block["coord_scale"])
                out_filters.append(prev_filters)
                out_strides.append(prev_stride)
                models.append(yolo_layer)
            else:
                print("unknown type %s" % (block["type"]))

        return models