def create_model(cfg, conf_distr): ''' ''' # base = networks_map[cfg.NETS] #number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios) for aspect_ratios in cfg.ASPECT_RATIOS] #number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios)+1 for aspect_ratios in cfg.ASPECT_RATIOS] #number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios)+2 for aspect_ratios in cfg.ASPECT_RATIOS] #number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else (len(aspect_ratios)+1)*2 for aspect_ratios in cfg.ASPECT_RATIOS] number_box = PriorBox.get_anchor_number(cfg.ASPECT_RATIOS) print('==>AnchorBox:', number_box) model = ssds_map[cfg.SSDS](base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES, conf_distr=conf_distr) # feature_maps = _forward_features_size(model, cfg.IMAGE_SIZE) print('==>Feature map size:') print(feature_maps) # priorbox = PriorBox(image_size=cfg.IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=cfg.ASPECT_RATIOS, scale=cfg.SIZES, archor_stride=cfg.STEPS, clip=cfg.CLIP) # priors = Variable(priorbox.forward(), volatile=True) return model, priorbox
def create_model(cfg): ''' ''' # base = networks_map[cfg.NETS] number_box = [ 2 + 2 * len(aspect_ratios) for aspect_ratios in cfg.ASPECT_RATIOS ] model = ssds_map[cfg.SSDS](base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) # feature_maps = _forward_features_size(model, cfg.IMAGE_SIZE) print('==>Feature map size:') print(feature_maps) # priorbox = PriorBox(image_size=cfg.IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=cfg.ASPECT_RATIOS, scale=cfg.SIZES, archor_stride=cfg.STEPS, clip=cfg.CLIP) # priors = Variable(priorbox.forward(), volatile=True) return model, priorbox
def create_model(cfg): ''' ''' # if cfg.NETS == 'mobilenet_v3': base = MobileNetV3_Large() model = torch.load("./Student/mbv3_large.old.pth.tar", map_location='cpu') weight = model["state_dict"] weights_norm = {i[7:]:weight[i] for i in weight} base.load_state_dict(weights_norm) elif cfg.NETS == 'mobilenet_v2': base = torchvision.models.mobilenet_v2(True).features[:18] elif cfg.NETS == 'resnet_18': base = torchvision.models.resnet18(True) base.fc = Identity() else: base = networks_map[cfg.NETS] number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios) for aspect_ratios in cfg.ASPECT_RATIOS] if cfg.NETS == 'mobilenet_v3': model = ssd_lite_MobileNetV3.build_ssd_lite(base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) elif cfg.NETS == 'mobilenet_v2': model = ssd_lite_MobileNetV2.build_ssd_lite(base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) elif cfg.NETS == 'resnet_18': model = ssd_ResNet18.build_ssd(base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) else: model = ssds_map[cfg.SSDS](base=base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) # feature_maps = _forward_features_size(model, cfg.IMAGE_SIZE) print('==>Feature map size:') print(feature_maps) # if cfg.PRIOR: priorbox = PriorBox(image_size=cfg.IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=cfg.ASPECT_RATIOS, scale=cfg.SIZES, archor_stride=cfg.STEPS, clip=cfg.CLIP) return model, priorbox, feature_maps else: return model
def create_model(cfg): ''' ''' # Base of network takes in image and outputs # a list of feautre maps at different scales net_base = networks_map[cfg.NETS] # Head of network takes in a list of feature maps # and outputs a list of tensors corresponding to # the location and class predictions of objects. ssd_head = ssds_map[cfg.SSDS] number_box = [(2 * len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios)) for aspect_ratios in cfg.ASPECT_RATIOS] # Combine base and head of network model = ssd_head(base=net_base, feature_layer=cfg.FEATURE_LAYER, mbox=number_box, num_classes=cfg.NUM_CLASSES) logger.info('Model blocks:') logger.info(model) feature_maps = _forward_features_size(model, cfg.IMAGE_SIZE) logger.info('Feature map size:') logger.info(feature_maps) # PriorBox takes in a list of feature maps and outputs # a tensor containing a list of bounding box priors used # to match location and class predictions priorbox = PriorBox(image_size=cfg.IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=cfg.ASPECT_RATIOS, scale=cfg.SIZES, archor_stride=cfg.STEPS, clip=cfg.CLIP) return model, priorbox