def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg['gpus'] dist.init_process_group(backend='nccl') img_size = int(self.model_cfg['compound_coef']) * 128 + 512 self.tdata = COCODataSets(img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=img_size, debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=True, remove_blank=self.data_cfg['remove_blank'] ) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, sampler=DistributedSampler(dataset=self.tdata, shuffle=True)) self.vdata = COCODataSets(img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=img_size, debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=False, remove_blank=False ) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn, sampler=DistributedSampler(dataset=self.vdata, shuffle=False)) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) model = EfficientDet(num_cls=self.model_cfg['num_cls'], compound_coef=self.model_cfg['compound_coef'] ) self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) local_rank = dist.get_rank() self.local_rank = local_rank self.device = torch.device("cuda", local_rank) model.to(self.device) self.scaler = amp.GradScaler(enabled=True) if self.optim_cfg['sync_bn']: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) self.model = nn.parallel.distributed.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank) self.optimizer = optimizer self.ema = ModelEMA(self.model) self.creterion = RetinaLoss(iou_thresh=self.hyper_params['iou_thresh'], ignore_thresh=self.hyper_params['ignore_thresh'], alpha=self.hyper_params['alpha'], gamma=self.hyper_params['gamma'], iou_type=self.hyper_params['iou_type'], coord_type=self.hyper_params['coord_type'] ) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust(init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights'] )
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg['gpus'] dist.init_process_group(backend='nccl') self.tdata = COCODataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=True, remove_blank=self.data_cfg['remove_blank']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, sampler=DistributedSampler( dataset=self.tdata, shuffle=True)) self.vdata = COCODataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=False, remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn, sampler=DistributedSampler( dataset=self.vdata, shuffle=False)) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) model = RetinaNet( num_cls=self.model_cfg['num_cls'], anchor_sizes=self.model_cfg['anchor_sizes'], strides=self.model_cfg['strides'], backbone=self.model_cfg['backbone'], ) if self.model_cfg.get("backbone_weight", None): weights = torch.load(self.model_cfg['backbone_weight']) model.load_backbone_weighs(weights) self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) local_rank = dist.get_rank() self.local_rank = local_rank self.device = torch.device("cuda", local_rank) model.to(self.device) model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) if self.optim_cfg['sync_bn']: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) self.model = nn.parallel.distributed.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank) self.optimizer = optimizer self.ema = ModelEMA(self.model) beta = eval(self.hyper_params['beta']) if isinstance(self.hyper_params['beta'], str) \ else self.hyper_params['beta'] self.creterion = RetinaAnchorFreeLoss( alpha=self.hyper_params['alpha'], gamma=self.hyper_params['gamma'], beta=beta, top_k=self.hyper_params['top_k'], box_iou_thresh=self.hyper_params['box_iou_thresh'], box_reg_weight=self.hyper_params['box_reg_weight']) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust( init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights'])
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg['gpus'] dist.init_process_group(backend='nccl') self.tdata = COCODataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], augments=True, use_crowd=self.data_cfg['use_crowd'], remove_blank=self.data_cfg['remove_blank']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, sampler=DistributedSampler( dataset=self.tdata, shuffle=True)) self.vdata = COCODataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], augments=False, use_crowd=self.data_cfg['use_crowd'], remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn, sampler=DistributedSampler( dataset=self.vdata, shuffle=False)) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) model = YOLOv5( num_cls=self.model_cfg['num_cls'], anchors=self.model_cfg['anchors'], strides=self.model_cfg['strides'], scale_name=self.model_cfg['scale_name'], ) self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) local_rank = dist.get_rank() self.local_rank = local_rank self.device = torch.device("cuda", local_rank) model.to(self.device) pretrain = self.model_cfg.get("pretrain", None) if pretrain: pretrain_weights = torch.load(pretrain, map_location=self.device) load_info = model.load_state_dict(pretrain_weights, strict=False) print("load_info ", load_info) self.scaler = amp.GradScaler(enabled=True) if self.optim_cfg['sync_bn']: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) self.model = nn.parallel.distributed.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank) self.optimizer = optimizer self.ema = ModelEMA(self.model) self.creterion = YOLOv5LossOriginal( iou_type=self.hyper_params['iou_type'], ) self.lr_adjuster = EpochWarmUpCosineDecayLRAdjust( init_lr=self.optim_cfg['lr'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], alpha=self.optim_cfg['alpha'], gamma=self.optim_cfg['gamma'], bias_idx=2)
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg['gpus'] dist.init_process_group(backend='nccl', init_method='env://') self.tdata = COCODataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=True, remove_blank=self.data_cfg['remove_blank']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, sampler=DistributedSampler( dataset=self.tdata, shuffle=True)) self.vdata = COCODataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=False, remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn, sampler=DistributedSampler( dataset=self.vdata, shuffle=False)) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) local_rank = dist.get_rank() self.local_rank = local_rank self.device = torch.device("cuda", local_rank) model = FCOS( num_cls=self.model_cfg['num_cls'], strides=self.model_cfg['strides'], backbone=self.model_cfg['backbone'], ) optimizer = split_optimizer(model, self.optim_cfg) model.to(self.device) pretrain = self.model_cfg.get('pretrain', None) if pretrain is not None: pretrained_weights = torch.load(pretrain, map_location=self.device) load_info = model.load_state_dict(pretrained_weights['ema'], strict=False) print('load info ', load_info) self.scaler = amp.GradScaler(enabled=True) if self.optim_cfg['sync_bn']: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) self.model = nn.parallel.distributed.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank) self.optimizer = optimizer self.ema = ModelEMA(self.model) self.creterion = FCOSLoss( alpha=self.hyper_params['alpha'], gamma=self.hyper_params['gamma'], radius=self.hyper_params['radius'], layer_limits=self.hyper_params['layer_limits'], strides=self.model_cfg['strides'], iou_type=self.hyper_params['iou_type']) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust( init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights']) self.tb_writer = None if self.local_rank == 0: log_dir = 'runs/' print( 'Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % log_dir) self.tb_writer = SummaryWriter(log_dir=log_dir) self.best_map = 0. self.best_map50 = 0.
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) self.tdata = COCODataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=True, remove_blank=self.data_cfg['remove_blank']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, shuffle=True) self.vdata = COCODataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=False, remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn, shuffle=False) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) model = RetinaNet( num_cls=self.model_cfg['num_cls'], anchor_sizes=self.model_cfg['anchor_sizes'], strides=self.model_cfg['strides'], backbone=self.model_cfg['backbone'], ) if self.model_cfg.get("backbone_weight", None): weights = torch.load(self.model_cfg['backbone_weight']) model.load_backbone_weighs(weights) self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) self.local_rank = 0 self.device = torch.device("cuda:0") model.to(self.device) self.model = model self.optimizer = optimizer self.ema = ModelEMA(self.model) beta = eval(self.hyper_params['beta']) if isinstance(self.hyper_params['beta'], str) \ else self.hyper_params['beta'] self.creterion = RetinaAnchorFreeLoss( alpha=self.hyper_params['alpha'], gamma=self.hyper_params['gamma'], beta=beta, top_k=self.hyper_params['top_k'], box_iou_thresh=self.hyper_params['box_iou_thresh'], box_reg_weight=self.hyper_params['box_reg_weight']) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust( init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights'])
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] self.model_cfg = self.cfg['model'] self.optim_cfg = self.cfg['optim'] self.hyper_params = self.cfg['hyper_params'] self.val_cfg = self.cfg['val'] print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg['gpus'] self.gpu_num = len(str(self.cfg['gpus']).split(",")) dist.init_process_group(backend='nccl') ########################################################################################### self.tdata = COCODataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=True, remove_blank=self.data_cfg['remove_blank']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn, sampler=DistributedSampler( dataset=self.tdata, shuffle=True)) self.vdata = COCODataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], use_crowd=self.data_cfg['use_crowd'], augments=False, remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=1, num_workers=1, collate_fn=self.vdata.collate_fn, sampler=DistributedSampler( dataset=self.vdata, shuffle=False)) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) ############################################################################################ model = CenterNet(num_cls=self.model_cfg['num_cls'], PIXEL_MEAN=self.model_cfg['PIXEL_MEAN'], PIXEL_STD=self.model_cfg['PIXEL_STD'], backbone=self.model_cfg['backbone'], cfg=self.model_cfg) self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) local_rank = dist.get_rank() self.local_rank = local_rank self.device = torch.device("cuda", local_rank) model.to(self.device) if self.optim_cfg['sync_bn']: model = nn.SyncBatchNorm.convert_sync_batchnorm(model) self.model = nn.parallel.distributed.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank) self.optimizer = optimizer self.ema = ModelEMA(self.model) self.gt_generator = CenterNetGT( alpha=self.model_cfg['alpha'], beta=self.model_cfg['beta'], num_cls=self.model_cfg['num_cls'], wh_planes=self.model_cfg['wh_planes'], down_ratio=self.model_cfg['down_ratio'], wh_area_process=self.model_cfg['wh_area_process']) self.creterion = CenterNetLoss( hm_weight=self.hyper_params['hm_weight'], wh_weight=self.hyper_params['wh_weight'], down_ratio=self.model_cfg['down_ratio']) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust( init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights'])
def __init__(self, cfg_path): with open(cfg_path, 'r') as rf: self.cfg = yaml.safe_load(rf) self.data_cfg = self.cfg['data'] # dataset params self.model_cfg = self.cfg['model'] # model params self.optim_cfg = self.cfg['optim'] # optim params self.hyper_params = self.cfg['hyper_params'] # other hyper params self.val_cfg = self.cfg['val'] # validation hyper params print(self.data_cfg) print(self.model_cfg) print(self.optim_cfg) print(self.hyper_params) print(self.val_cfg) os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg[ 'gpus'] # set avaliable gpu ## load dataset --------------------------------------------------------------------------------------- # self.tdata = COCODataSets(img_root=self.data_cfg['train_img_root'], # annotation_path=self.data_cfg['train_annotation_path'], # img_size=self.data_cfg['img_size'], # debug=self.data_cfg['debug'], # augments=True, # remove_blank=self.data_cfg['remove_blank'], # image_weight = self.hyper_params['use_weight_sample'] # ) self.tdata = BDD100DataSets( img_root=self.data_cfg['train_img_root'], annotation_path=self.data_cfg['train_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], augments=True, remove_blank=self.data_cfg['remove_blank'], image_weight=self.hyper_params['use_weight_sample']) self.tloader = DataLoader(dataset=self.tdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.tdata.collate_fn) # self.vdata = COCODataSets(img_root=self.data_cfg['val_img_root'], # annotation_path=self.data_cfg['val_annotation_path'], # img_size=self.data_cfg['img_size'], # debug=self.data_cfg['debug'], # augments=False, # remove_blank=False # ) self.vdata = BDD100DataSets( img_root=self.data_cfg['val_img_root'], annotation_path=self.data_cfg['val_annotation_path'], img_size=self.data_cfg['img_size'], debug=self.data_cfg['debug'], augments=False, remove_blank=False) self.vloader = DataLoader(dataset=self.vdata, batch_size=self.data_cfg['batch_size'], num_workers=self.data_cfg['num_workers'], collate_fn=self.vdata.collate_fn) print("train_data: ", len(self.tdata), " | ", "val_data: ", len(self.vdata), " | ", "empty_data: ", self.tdata.empty_images_len) print("train_iter: ", len(self.tloader), " | ", "val_iter: ", len(self.vloader)) ### define model ------------------------------------------------------------------------------------- model = YOLOv5(in_channels=3, num_cls=self.model_cfg['num_cls'], anchors=self.model_cfg['anchors'], strides=self.model_cfg['strides'], scale_name=self.model_cfg['scale_name']) ### check anchor ------------------------------------------------------------------------------------- # check_anchors(self.tdata,model,self.hyper_params['anchor_t'],self.data_cfg['img_size']) ############------------------------------------------------------------------------------------------ self.best_map = 0. self.best_map50 = 0. optimizer = split_optimizer(model, self.optim_cfg) self.device = torch.device('cuda:0') model.to(self.device) pretrain = self.model_cfg.get('pretrain', None) if pretrain: pretrained_weights = torch.load(pretrain, map_location=self.device) load_info = model.load_state_dict(pretrained_weights['ema'], strict=False) print('load info ', load_info) # 通过torch1.6自带的api设置混合精度训练 self.scaler = amp.GradScaler(enabled=True) self.model = model self.optimizer = optimizer self.ema = ModelEMA(self.model) self.creterion = YOLOv5LossOriginal( iou_type=self.hyper_params['iou_type'], fl_gamma=self.hyper_params['fl_gamma'], class_smoothing_eps=self.hyper_params['class_smoothing_eps']) self.lr_adjuster = WarmUpCosineDecayMultiStepLRAdjust( init_lr=self.optim_cfg['lr'], milestones=self.optim_cfg['milestones'], warm_up_epoch=self.optim_cfg['warm_up_epoch'], iter_per_epoch=len(self.tloader), epochs=self.optim_cfg['epochs'], cosine_weights=self.optim_cfg['cosine_weights']) ## for class-aware weighted sampling --------------------------------------------------------------------- self.class_weights = labels_to_class_weights(self.tdata.labels, nc=self.model_cfg['num_cls']).to(self.device) if \ self.hyper_params['use_weight_sample'] else None self.maps = np.zeros(self.model_cfg['num_cls']) # mAP per class