def __init__(self, config_path, img_size=416): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def __init__(self, config_path, yolo=0): ''' :param config_path: 从cfg配置文件中解析各种参数 :param yolo 为0,1,2 指定YOLO层 ''' super(YOLOLoss, self).__init__() self.module_defs = parse_model_config(config_path) # 解析网络配置文件 self.hyperparams = self.module_defs.pop(0) # 超参数 self.anchors = [] # 存放anchor的长宽 for module_def in self.module_defs: if module_def["type"] == "yolo": # 寻找YOLO块 if module_def["mask"][0] == str(3 * yolo): # 寻找是否是对应序号的yolo块 anchor_idxs = [ int(x) for x in module_def["mask"].split(",") ] # Anchor的序号,yolov3中每个特征图有3个Anchor anchors = [ int(x) for x in module_def["anchors"].split(",") ] anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)] self.anchors = [anchors[i] for i in anchor_idxs] # 提取3个Anchor break self.num_anchors = len(self.anchors) # anchor的数量 self.num_classes = int(module_def["classes"]) # 类别的数量 self.img_dim = int(self.hyperparams["width"]) self.ignore_thres = float(module_def["ignore_thresh"]) self.obj_scale = 1 self.noobj_scale = 1 self.metrics = {} self.grid_size = torch.tensor(0) # grid size self.mse_loss = nn.MSELoss() # 最小均方误差 self.bce_loss = nn.BCELoss() # 二分类交叉熵
def __init__(self, config_path, img_size=416): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")] self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def __init__(self, config_path:str, img_size:int=416): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) #读文件解析网络结构 self.module_list = create_modules(self.module_defs)#创建网络模型 self.img_size:int = img_size self.seen:int = 0 self.header_info = torch.IntTensor(np.array([0, 0, 0, self.seen, 0], dtype=np.int32)) self.data_type = torch.nn.Parameter(torch.randn(1))
def __init__(self, config_path, img_size=416, weights_FIMs=None, alpha=1.): super().__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0]) self.loss_names = ['NLL_loss'] self.weights_FIMs = weights_FIMs self.alpha = alpha
def __init__(self, config_path, img_size=416, weights_FIMs=None, alpha=1.): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0]) self.loss_names = ['x', 'y', 'w', 'h', 'conf', 'cls', 'recall'] self.weights_FIMs = weights_FIMs # For Regularization self.alpha = alpha
def __init__(self, config_path, img_size=416): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0]) self.loss_names = [ "x", "y", "w", "h", "conf", "cls", "recall", "precision" ]
def __init__(self, config_path): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules(self.module_defs) self.yolo_layers = [ layer[0] for layer in self.module_list if isinstance(layer[0], YOLOLayer) ] self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def create_model(opt): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = Darknet(opt.cfg).to(device) #model.apply(weights_init_normal) if opt.pretrained_weights: if opt.pretrained_weights.endswith(".pth"): model.load_state_dict(torch.load(opt.pretrained_weights)) else: model.load_darknet_weights(opt.pretrained_weights) model_cfg = parse_model_config(opt.cfg) return model, model_cfg
def __init__(self, cfgfile_path, img_size=416): super(Darknet, self).__init__() self.module_blocks = parse_config.parse_model_config(cfgfile_path) self.module_blocks[0]['height'] = img_size self.net_hyperparams, self.module_list = create_modules( self.module_blocks) self.img_size = img_size self.seen = 0 self.header_info = np.array([0, 0, 0, self.seen, 0]) self.loss_names = [ 'loss', 'x', 'y', 'w', 'h', 'conf', 'cls', 'nT', 'TP', 'FP', 'FPe', 'FN', 'TC' ]
def main(): # 网络参数初始化 args = parse_args() net = Darknet(args.model_def) start_epoch = 0 parameters = parse_model_config(args.model_def).pop(0) batch_size = int(parameters['batch']) lr = float(parameters['learning_rate']) mom = float(parameters['momentum']) decay = float(parameters['decay']) # 类别名称 namespath = args.names with open(namespath, 'r') as f: names = f.read().split("\n")[:-1] # 是否使用GPU多卡训练,加载darknet部分模型,多卡部分有点问题 if CUDA: net.to(torch.device("cuda")) # net = nn.DataParallel(net) if args.pretrained_weights: if args.pretrained_weights.endswith(".pth"): net.load_state_dict(torch.load(args.pretrained_weights)) else: net.load_darknet_weights(args.pretrained_weights) # net = nn.DataParallel(net) # 断点加载 if args.resume: ckpt = args.ckpt checkpoint = torch.load(ckpt) net.load_state_dict(checkpoint['model']) start_epoch = checkpoint['epoch'] print("load the model from {} and start epoch is: {}".format( ckpt, start_epoch)) # initial tensorboardX writer if args.use_tfboard: if args.exp_name == 'default': writer = SummaryWriter() else: writer = SummaryWriter('runs/' + args.exp_name) # 生成模型输出路径 if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) # 加载数据集 print('loading datasets....') trainpath = args.traindata valpath = args.valdata train_dataset = ImageFolder(trainpath, augment=True, multiscale=args.multiscale_training) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=train_dataset.collate_fn) val_dataset = ImageFolder(valpath, img_size=args.img_size, augment=False, multiscale=False) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=val_dataset.collate_fn) print('training data number: {}'.format(len(train_dataset)), "val data number: {}".format(len(val_dataset))) # 优化器初始化 # optimizer = optim.SGD(net.parameters(), lr=lr, momentum=mom, weight_decay=decay) optimizer = torch.optim.Adam(net.parameters()) for epoch in range(start_epoch, args.max_epochs): train(epoch, net, train_dataloader, optimizer, args) val(epoch, args, net, val_dataloader, 0.5, conf_thresh=0.5, nms_thresh=0.5, img_size=args.img_size) torch.cuda.empty_cache()
def test_parse_model_config(self): module_defs = parse_model_config( './13-1-yolo-pytorch/config/yolov3.cfg') print(module_defs)
print('None') # # inner product layer # # if meet inner product layer, # # the next bias weight can be misclassified as 'BatchNorm' layer as len(params.size()) == 1 # new_weights[name] = params # inner_product_flag = True # align names in new_weights with pytorch model # after move BatchNorm layer in pytorch model, # the layer names between old model and new model will mis-align pytorch_net_key_list = list(pytorch_net.state_dict().keys()) new_weights_key_list = list(new_weights.keys()) print(len(pytorch_net_key_list)) print(len(new_weights_key_list)) print('Aligning weight names...') module_blocks = parse_config.parse_model_config(net_config_path) module_blocks[0]['height'] = img_size net_hyperparams, module_list = create_modules(module_blocks) pytorch_net_key_list = list(module_list.state_dict().keys()) new_weights_key_list = list(new_weights.keys()) print(len(pytorch_net_key_list)) print(len(new_weights_key_list)) # print(new_weights_key_list) # assert len(pytorch_net_key_list) == len(new_weights_key_list) for index in range(len(pytorch_net_key_list)): print(pytorch_net_key_list[index]) # print(new_weights_key_list[index]) new_weights[pytorch_net_key_list[index]] = new_weights.pop( new_weights_key_list[index])
@:param path - path of the new weights file @:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved) """ fp = open(path, "wb") self.header_info[3] = self.seen self.header_info.tofile(fp) # Iterate through layers for i, (module_def, module) in enumerate( zip(self.module_defs[:cutoff], self.module_list[:cutoff])): if module_def["type"] == "convolutional": conv_layer = module[0] # If batch norm, load bn first if module_def["batch_normalize"]: bn_layer = module[1] bn_layer.bias.data.cpu().numpy().tofile(fp) bn_layer.weight.data.cpu().numpy().tofile(fp) bn_layer.running_mean.data.cpu().numpy().tofile(fp) bn_layer.running_var.data.cpu().numpy().tofile(fp) # Load conv bias else: conv_layer.bias.data.cpu().numpy().tofile(fp) # Load conv weights conv_layer.weight.data.cpu().numpy().tofile(fp) fp.close() if __name__ == '__main__': blocks = parse_model_config("config/yolov3.cfg") print(create_modules(blocks))
# -*- coding: utf-8 -*- import torch from torch.utils.data import DataLoader from torch.autograd import Variable from datasets import YOLODataset from utils.parse_config import parse_model_config from utils.utils import weights_init_normal from models import Darknet cuda = torch.cuda.is_available() model_config_path = "config/yolov3.cfg" train_path = "data/test.txt" # Get hyperparameters hyperparams = parse_model_config(model_config_path)[0] learning_rate = float(hyperparams["learning_rate"]) momentum = float(hyperparams["momentum"]) decay = float(hyperparams["decay"]) burn_in = int(hyperparams["burn_in"]) # Initiate model model = Darknet(model_config_path) model.apply(weights_init_normal) model.train() # Get dataloader dataloader = DataLoader(YOLODataset(train_path), batch_size=1, shuffle=False) Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def __init__(self, config_path, xy_loss, wh_loss, no_object_loss, object_loss, vanilla_anchor): super(Darknet, self).__init__() self.module_defs = parse_model_config(config_path) self.hyperparams, self.module_list = create_modules( module_defs=self.module_defs, xy_loss=xy_loss, wh_loss=wh_loss, no_object_loss=no_object_loss, object_loss=object_loss, vanilla_anchor=vanilla_anchor) self.img_width = int(self.hyperparams["width"]) self.img_height = int(self.hyperparams["height"]) # in order to help train.py defines the onnx filename since it is not defined by yolo2onnx.py self.onnx_height = int(self.hyperparams["onnx_height"]) self.onnx_name = config_path.split('/')[-1].split('.')[0] + '_' + str( self.img_width) + str(self.onnx_height) + '.onnx' self.num_classes = int(self.hyperparams["classes"]) if int(self.hyperparams["channels"]) == 1: self.bw = True elif int(self.hyperparams["channels"]) == 3: self.bw = False else: print('Channels in cfg file is not set properly, making it colour') self.bw = False current_month = datetime.now().strftime('%B').lower() current_year = str(datetime.now().year) self.validate_uri = self.hyperparams["validate_uri"] self.train_uri = self.hyperparams["train_uri"] self.num_train_images = int(self.hyperparams["num_train_images"]) self.num_validate_images = int(self.hyperparams["num_validate_images"]) self.conf_thresh = float(self.hyperparams["conf_thresh"]) self.nms_thresh = float(self.hyperparams["nms_thresh"]) self.iou_thresh = float(self.hyperparams["iou_thresh"]) self.start_weights_dim = [ int(x) for x in self.hyperparams["start_weights_dim"].split(',') ] self.conv_activation = self.hyperparams["conv_activation"] ##### loss constants ##### self.xy_loss = xy_loss self.wh_loss = wh_loss self.no_object_loss = no_object_loss self.object_loss = object_loss ##### reading anchors from train.csv ##### csv_uri = self.hyperparams["train_uri"] training_csv_tempfile = csv_uri with open(training_csv_tempfile) as f: csv_reader = csv.reader(f) row = next(csv_reader) row = str(row)[2:-2] anchor_list = [[float(y) for y in x.split(',')] for x in row.split("'")[0].split('|')] ############################# ##### using vanilla anchor boxes until skanda dataloader is done ##### if vanilla_anchor: anchor_list = vanilla_anchor_list ############################# self.anchors = anchor_list self.seen = 0 self.header_info = torch.tensor([0, 0, 0, self.seen, 0])