def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) # Only support for the DeepLabv3+ model if args.data_format == 'NHWC': if cfg.dic['model']['type'] != 'DeepLabV3P': raise ValueError( 'The "NHWC" data format only support the DeepLabV3P model!') cfg.dic['model']['data_format'] = args.data_format cfg.dic['model']['backbone']['data_format'] = args.data_format loss_len = len(cfg.dic['loss']['types']) for i in range(loss_len): cfg.dic['loss']['types'][i]['data_format'] = args.data_format val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model skip_quant(model) quantizer = QAT(config=quant_config) quant_model = quantizer.quantize(model) logger.info('Quantize the model successfully') if args.model_path: utils.load_entire_model(quant_model, args.model_path) logger.info('Loaded trained params of model successfully') test_config = get_test_config(cfg, args) config_check(cfg, val_dataset=val_dataset) evaluate(quant_model, val_dataset, num_workers=args.num_workers, **test_config)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) if cfg.dic["data"]["target"]["dataset"] == 'cityscapes': val_dataset = CityDataset(split='val', **cfg.dic["data"]["target"]["kwargs"]) else: raise NotImplementedError() if len(val_dataset) < 500: print(len(val_dataset)) for i in range(len(val_dataset)): print(val_dataset[i]) if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') test_config = get_test_config(cfg, args) val.evaluate(model, val_dataset, num_workers=args.num_workers, **test_config)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') config_check(cfg, val_dataset=val_dataset) evaluate( model, val_dataset, aug_eval=args.aug_eval, scales=args.scales, flip_horizontal=args.flip_horizontal, flip_vertical=args.flip_vertical, is_slide=args.is_slide, crop_size=args.crop_size, stride=args.stride, num_workers=args.num_workers, )
def main(args): os.environ['PADDLESEG_EXPORT_STAGE'] = 'True' cfg = Config(args.cfg) net = cfg.model skip_quant(net) quantizer = QAT(config=quant_config) quant_net = quantizer.quantize(net) logger.info('Quantize the model successfully') if args.model_path: utils.load_entire_model(quant_net, args.model_path) logger.info('Loaded trained params of model successfully') if not args.without_argmax or args.with_softmax: new_net = SavedSegmentationNet(quant_net, args.without_argmax, args.with_softmax) else: new_net = net new_net.eval() save_path = os.path.join(args.save_dir, 'model') input_spec = [ paddle.static.InputSpec(shape=[None, 3, None, None], dtype='float32') ] quantizer.save_quantized_model(new_net, save_path, input_spec=input_spec) yml_file = os.path.join(args.save_dir, 'deploy.yaml') with open(yml_file, 'w') as file: transforms = cfg.export_config.get('transforms', [{ 'type': 'Normalize' }]) data = { 'Deploy': { 'transforms': transforms, 'model': 'model.pdmodel', 'params': 'model.pdiparams' } } yaml.dump(data, file) logger.info(f'Model is saved in {args.save_dir}.')
def init_weight(self): if self.pretrained is not None: utils.load_entire_model(self, self.pretrained)
def __init__(self, img_size=384, patch_size=4, in_chans=3, class_num=1000, embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, pretrained=False): super(SwinTransformer_large_patch4_window12_384, self).__init__() self.num_classes = num_classes = class_num self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2**(self.num_layers - 1)) self.mlp_ratio = mlp_ratio self.pretrained = pretrained # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = self.create_parameter( shape=(1, num_patches, embed_dim), default_initializer=zeros_) self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) trunc_normal_(self.absolute_pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = np.linspace(0, drop_path_rate, sum(depths)).tolist() # stochastic depth decay rule # build layers self.layers = nn.LayerList() for i_layer in range(self.num_layers): layer = BasicLayer( dim=int(embed_dim * 2**i_layer), input_resolution=(patches_resolution[0] // (2**i_layer), patches_resolution[1] // (2**i_layer)), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layer) self.norm = norm_layer(self.num_features) # self.avgpool = nn.AdaptiveAvgPool1D(1) # self.head = nn.Linear( # self.num_features, # num_classes) if self.num_classes > 0 else nn.Identity() self.apply(self._init_weights) if pretrained: utils.load_entire_model( self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/SwinTransformer_large_patch4_window12_384_22kto1k_pretrained.pdparams' )
def __init__(self, layers=101, cardinality=32, width=16, pretrained=False): super(ResNeXt101_32x16d_wsl, self).__init__() self.pretrained = pretrained self.layers = layers self.cardinality = cardinality self.width = width self.scale = width // 8 self.depth = [3, 4, 23, 3] self.base_width = cardinality * width num_filters = [self.base_width * i for i in [1, 2, 4, 8]] # [256, 512, 1024, 2048] self._conv_stem = ConvBNLayer( 3, 64, 7, stride=2, act="relu", name="conv1") self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1) self._conv1_0 = BottleneckBlock( 64, num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.0") self._conv1_1 = BottleneckBlock( num_filters[0] // (width // 8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.1") self._conv1_2 = BottleneckBlock( num_filters[0] // (width // 8), num_filters[0], stride=1, cardinality=self.cardinality, width=self.width, name="layer1.2") self._conv2_0 = BottleneckBlock( num_filters[0] // (width // 8), num_filters[1], stride=2, cardinality=self.cardinality, width=self.width, name="layer2.0") self._conv2_1 = BottleneckBlock( num_filters[1] // (width // 8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.1") self._conv2_2 = BottleneckBlock( num_filters[1] // (width // 8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.2") self._conv2_3 = BottleneckBlock( num_filters[1] // (width // 8), num_filters[1], stride=1, cardinality=self.cardinality, width=self.width, name="layer2.3") self._conv3_0 = BottleneckBlock( num_filters[1] // (width // 8), num_filters[2], stride=2, cardinality=self.cardinality, width=self.width, name="layer3.0") self._conv3_1 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.1") self._conv3_2 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.2") self._conv3_3 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.3") self._conv3_4 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.4") self._conv3_5 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.5") self._conv3_6 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.6") self._conv3_7 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.7") self._conv3_8 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.8") self._conv3_9 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.9") self._conv3_10 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.10") self._conv3_11 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.11") self._conv3_12 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.12") self._conv3_13 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.13") self._conv3_14 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.14") self._conv3_15 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.15") self._conv3_16 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.16") self._conv3_17 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.17") self._conv3_18 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.18") self._conv3_19 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.19") self._conv3_20 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.20") self._conv3_21 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.21") self._conv3_22 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[2], stride=1, cardinality=self.cardinality, width=self.width, name="layer3.22") self._conv4_0 = BottleneckBlock( num_filters[2] // (width // 8), num_filters[3], stride=2, cardinality=self.cardinality, width=self.width, name="layer4.0") self._conv4_1 = BottleneckBlock( num_filters[3] // (width // 8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.1") self._conv4_2 = BottleneckBlock( num_filters[3] // (width // 8), num_filters[3], stride=1, cardinality=self.cardinality, width=self.width, name="layer4.2") if pretrained: utils.load_entire_model(self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNeXt101_32x16_wsl_pretrained.pdparams')
def main(args): if args.seed is not None: paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config( args.cfg, learning_rate=args.learning_rate, iters=args.iters, batch_size=args.batch_size) train_dataset = cfg.train_dataset if train_dataset is None: raise RuntimeError( 'The training dataset is not specified in the configuration file.') elif len(train_dataset) == 0: raise ValueError( 'The length of train_dataset is 0. Please check if your dataset is valid' ) val_dataset = cfg.val_dataset if args.do_eval else None losses = cfg.loss msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset) model = cfg.model if args.model_path: utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') skip_quant(model) quantizer = QAT(config=quant_config) quant_model = quantizer.quantize(model) logger.info('Quantize the model successfully') train( quant_model, train_dataset, val_dataset=val_dataset, optimizer=cfg.optimizer, save_dir=args.save_dir, iters=cfg.iters, batch_size=cfg.batch_size, resume_model=None, save_interval=args.save_interval, log_iters=args.log_iters, num_workers=args.num_workers, use_vdl=args.use_vdl, losses=losses, keep_checkpoint_max=args.keep_checkpoint_max)
def __init__(self, layers=200, scales=4, width=26, pretrained=False): super(Res2Net200_vd_26w_4s_ssld, self).__init__() self.layers = layers self.pretrained = pretrained self.scales = scales self.width = width basic_width = self.width * self.scales supported_layers = [50, 101, 152, 200] assert layers in supported_layers, \ "supported layers are {} but input layer is {}".format( supported_layers, layers) if layers == 50: depth = [3, 4, 6, 3] elif layers == 101: depth = [3, 4, 23, 3] elif layers == 152: depth = [3, 8, 36, 3] elif layers == 200: depth = [3, 12, 48, 3] num_channels = [64, 256, 512, 1024] num_channels2 = [256, 512, 1024, 2048] num_filters = [basic_width * t for t in [1, 2, 4, 8]] self.conv1_1 = ConvBNLayer(num_channels=3, num_filters=32, filter_size=3, stride=2, act='relu', name="conv1_1") self.conv1_2 = ConvBNLayer(num_channels=32, num_filters=32, filter_size=3, stride=1, act='relu', name="conv1_2") self.conv1_3 = ConvBNLayer(num_channels=32, num_filters=64, filter_size=3, stride=1, act='relu', name="conv1_3") self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1) self.block_list = [] for block in range(len(depth)): shortcut = False for i in range(depth[block]): if layers in [101, 152, 200] and block == 2: if i == 0: conv_name = "res" + str(block + 2) + "a" else: conv_name = "res" + str(block + 2) + "b" + str(i) else: conv_name = "res" + str(block + 2) + chr(97 + i) bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BottleneckBlock(num_channels1=num_channels[block] if i == 0 else num_channels2[block], num_channels2=num_channels2[block], num_filters=num_filters[block], stride=2 if i == 0 and block != 0 else 1, scales=scales, shortcut=shortcut, if_first=block == i == 0, name=conv_name)) self.block_list.append(bottleneck_block) shortcut = True if pretrained: utils.load_entire_model( self, 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/Res2Net200_vd_26w_4s_pretrained.pdparams' )