def _main_h5(args): print("\nTraining YOLO on H5!\n") config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) pp.pprint(config) yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) # yolo.load_weights("/home/cheng/Desktop/repos/keras-yolo2/snapshots/latest_all_shovel_full_yolo/full_yolo_34.h5") yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'], full_log_dir=config['train']['full_log_dir'], early_stop_patience=config['train']['early_stop_patience'], early_stop_min_delta=config['train']['early_stop_min_delta'], learning_rate_decay_factor=config['train'] ['learning_rate_decay_factor'], learning_rate_decay_patience=config['train'] ['learning_rate_decay_patience'], learning_rate_decay_min_lr=config['train'] ['learning_rate_decay_min_lr']) return
def main(argstate): ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(argstate.train.annot_folder, argstate.train.image_folder, argstate.labels) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(argstate.valid.annot_folder): valid_imgs, valid_labels = parse_annotation( argstate.valid.annot_folder, argstate.valid.image_folder, argstate.labels) else: train_valid_split = int(0.8 * len(train_imgs)) np.random.seed(42) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] print(train_labels) if len(set(argstate.labels).intersection(set(train_labels.keys()))) == 0: print( "Labels to be detected are not present in the dataset! Please revise the list of labels in the config.json file!" ) return ############################### # Construct the model ############################### global yolo yolo = YOLO(architecture=argstate.architecture, input_size=argstate.input_size, labels=argstate.labels, max_box_per_image=argstate.mbpi, anchors=argstate.anchors) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(argstate.pretrained_weights): print("Loading pre-trained weights in", argstate.pretrained_weights) yolo.load_weights(argstate.pretrained_weights) # Load pretrained weights into feature extractor and then freeze them yolo.feature_extractor.feature_extractor.load_weights('yolov2_weights.h5') yolo.feature_extractor.feature_extractor.trainable = True # Spawn train pauser pause_thread = Thread(target=spawn_pause_ui) pause_thread.isDaemon = True pause_thread.start() ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=argstate.train.times, valid_times=argstate.valid.times, nb_epoch=argstate.nb_epoch, learning_rate=argstate.learning_rate, batch_size=argstate.batch_size, warmup_bs=argstate.warmup_bs, object_scale=argstate.object_scale, no_object_scale=argstate.no_object_scale, coord_scale=argstate.coord_scale, class_scale=argstate.class_scale, saved_weights_name=argstate.saved_weights_name, debug=argstate.debug)
def _main_(args): config_path = args.conf keras.backend.tensorflow_backend.set_session(get_session()) with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if config['backup']['create_backup']: config = create_backup(config) ############################### # Parse the annotations ############################### if config['parser_annotation_type'] == 'xml': # parse annotations of the training set train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) split = False else: split = True elif config['parser_annotation_type'] == 'csv': # parse annotations of the training set train_imgs, train_labels = parse_annotation_csv( config['train']['train_csv_file'], config['model']['labels'], config['train']['train_csv_base_path']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_csv_file']): valid_imgs, valid_labels = parse_annotation_csv( config['valid']['valid_csv_file'], config['model']['labels'], config['valid']['valid_csv_base_path']) split = False else: print("Validation file not found commensing split") split = True else: raise ValueError( "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format( config['parser_annotations_type'])) if split: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') config['model']['labels'] = train_labels.keys() with open("labels.json", 'w') as outfile: json.dump({"labels": list(train_labels.keys())}, outfile) ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_size=(config['model']['input_size_h'], config['model']['input_size_w']), labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors'], gray_mode=config['model']['gray_mode']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'], early_stop=config['train']['early_stop'], workers=config['train']['workers'], max_queue_size=config['train']['max_queue_size'], tb_logdir=config['train']['tensorboard_log_dir'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = aerial_parse_annotation( config['train']['train_image_name_list'], config['model']['labels']) # parse annotations of the validation set. valid_imgs, valid_labels = aerial_parse_annotation( config['valid']['valid_image_name_list'], config['model']['labels']) if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print 'Seen labels:\t', train_labels print 'Given labels:\t', config['model']['labels'] print 'Overlap labels:\t', overlap_labels if len(overlap_labels) < len(config['model']['labels']): print 'Some labels have no annotations! Please revise the list of labels in the config.json file!' return else: print 'No labels are provided. Train on all seen labels.' config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(architecture=config['model']['architecture'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print "Loading pre-trained weights in", config['train'][ 'pretrained_weights'] yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epoch=config['train']['nb_epoch'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) glob_str = config['train']['train_image_folder'] + '/*.npz' filelist = glob.glob(glob_str) logger.info('train_image_folder = %s', glob_str) logger.info('filelist = %s', filelist) ############################### # Parse the annotations ############################### ''' # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else:''' train_valid_split = int(0.8 * len(filelist)) np.random.shuffle(filelist) valid_imgs = filelist[train_valid_split:] train_imgs = filelist[:train_valid_split] logger.info('Length of filelist: %s', len(filelist)) logger.info('Length of train_images: %s', len(train_imgs)) logger.info('Length of valid_imgs: %s', len(valid_imgs)) ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_shape=config['model']['input_shape'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, evts_per_file=config['train']['evts_per_file'], train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], use_caching=config['train']['use_caching'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8*len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] print train_labels if len(set(config['model']['labels']).intersection(set(train_labels.keys()))) == 0: print "Labels to be detected are not present in the dataset! Please revise the list of labels in the config.json file!" return ############################### # Construct the model ############################### yolo = YOLO(architecture = config['model']['architecture'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print "Loading pre-trained weights in", config['train']['pretrained_weights'] yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs = train_imgs, valid_imgs = valid_imgs, train_times = config['train']['train_times'], valid_times = config['valid']['valid_times'], nb_epoch = config['train']['nb_epoch'], learning_rate = config['train']['learning_rate'], batch_size = config['train']['batch_size'], warmup_bs = config['train']['warmup_batches'], object_scale = config['train']['object_scale'], no_object_scale = config['train']['no_object_scale'], coord_scale = config['train']['coord_scale'], class_scale = config['train']['class_scale'], saved_weights_name = config['train']['saved_weights_name'], debug = config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if config['train']['gpu_count'] > 1: os.environ["CUDA_VISIBLE_DEVICES"] = ','.join( [str(i) for i in range(config['train']['gpu_count'])]) ############################### # Parse the annotations ############################### # parse annotations of the training set train_images, validation_images = load_images(config) # parse annotations of the validation set, if any, otherwise split the training set if len(config['model']['labels']) > 0: print('Given labels:\t', config['model']['labels']) ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_images, valid_imgs=validation_images, train_times=config['train']['train_times'], valid_times=1, nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], saved_weights_dir=config['train']['saved_weights_dir'], save_every_n_epoch=config['train']['save_every_n_epoch'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], multi_gpu=config['train']['gpu_count'] > 1, debug=config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8*len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys())) print 'Seen labels:\t', train_labels print 'Given labels:\t', config['model']['labels'] print 'Overlap labels:\t', overlap_labels if len(overlap_labels) < len(config['model']['labels']): print 'Some labels have no annotations! Please revise the list of labels in the config.json file!' return else: print 'No labels are provided. Train on all seen labels.' config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(architecture = config['model']['architecture'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print "Loading pre-trained weights in", config['train']['pretrained_weights'] yolo.load_weights(config['train']['pretrained_weights']) for layer in galaxyModel.layers: print(layer) layer.trainable = True ############################### # Start the training process ############################### if args.training: yolo.train(train_imgs = train_imgs, valid_imgs = valid_imgs, train_times = config['train']['train_times'], valid_times = config['valid']['valid_times'], nb_epoch = config['train']['nb_epoch'], learning_rate = config['train']['learning_rate'], batch_size = config['train']['batch_size'], warmup_epochs = config['train']['warmup_epochs'], object_scale = config['train']['object_scale'], no_object_scale = config['train']['no_object_scale'], coord_scale = config['train']['coord_scale'], class_scale = config['train']['class_scale'], saved_weights_name = config['train']['saved_weights_name'], debug = config['train']['debug']) image = cv2.imread(config['valid']['valid_image_folder'] + '/10.png') plt.figure(figsize=(10,10)) boxes = yolo.predict(image) image = draw_boxes(image, boxes, labels=config['model']['labels']) plt.imshow(image[:,:,::-1]); plt.show()
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) os.environ["CUDA_VISIBLE_DEVICES"]=config['env']['gpu'] gpus = max(1, len(config['env']['gpu'].split(","))) #print("{}").format(config) ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels'], config['train']['image_ext_name'], config['train']['image_prefix']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels'], config['train']['image_ext_name'], config['train']['image_prefix']) else: train_valid_split = int(0.8*len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys())) print ('Seen labels:{}'.format(train_labels)) print ('Given labels:{}'.format(config['model']['labels'])) print ('Overlap labels:{}'.format(overlap_labels)) if len(overlap_labels) < len(config['model']['labels']): print ('Some labels have no images! Please revise the list of labels in the config.json file!') return ############################### # Construct the model ############################### yolo = YOLO(architecture = config['model']['architecture'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors'] gpus = gpus) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print ("Loading pre-trained weights in {}".format(config['train']['pretrained_weights'])) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs = train_imgs, valid_imgs = valid_imgs, train_times = config['train']['train_times'], valid_times = config['valid']['valid_times'], nb_epoch = config['train']['nb_epoch'], learning_rate = config['train']['learning_rate'], batch_size = config['train']['batch_size'], warmup_bs = config['train']['warmup_batches'], object_scale = config['train']['object_scale'], no_object_scale = config['train']['no_object_scale'], coord_scale = config['train']['coord_scale'], class_scale = config['train']['class_scale'], saved_weights_name = config['train']['saved_weights_name'], debug = config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8*len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys())) print 'Seen labels:\t', train_labels print 'Given labels:\t', config['model']['labels'] print 'Overlap labels:\t', overlap_labels if len(overlap_labels) < len(config['model']['labels']): print 'Some labels have no annotations! Please revise the list of labels in the config.json file!' return else: print 'No labels are provided. Train on all seen labels.' config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(architecture = config['model']['architecture'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print "Loading pre-trained weights in", config['train']['pretrained_weights'] yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs = train_imgs, valid_imgs = valid_imgs, train_times = config['train']['train_times'], valid_times = config['valid']['valid_times'], nb_epoch = config['train']['nb_epoch'], learning_rate = config['train']['learning_rate'], batch_size = config['train']['batch_size'], warmup_epochs = config['train']['warmup_epochs'], object_scale = config['train']['object_scale'], no_object_scale = config['train']['no_object_scale'], coord_scale = config['train']['coord_scale'], class_scale = config['train']['class_scale'], saved_weights_name = config['train']['saved_weights_name'], debug = config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) pp.pprint(config) train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(str(config['valid']['valid_annot_folder'])): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: print("Splitting into train and validation") train_valid_split = int(0.99 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print(('Seen labels:\t', train_labels)) print(('Given labels:\t', config['model']['labels'])) print(('Overlap labels:\t', overlap_labels)) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') config['model']['labels'] = list(train_labels.keys()) yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) if os.path.exists(str(config['train']['pretrained_weights'])): print(("Loading pre-trained weights in", config['train']['pretrained_weights'])) if config['model']['backend'] == "MobileNet": is_mobilenet = True else: print((config['model']['backend'])) is_mobilenet = False yolo.load_weights(config['train']['pretrained_weights']) print("\nStarting training...") yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'], full_log_dir=config['train']['full_log_dir'], early_stop_patience=config['train']['early_stop_patience'], early_stop_min_delta=config['train']['early_stop_min_delta'], learning_rate_decay_factor=config['train'] ['learning_rate_decay_factor'], learning_rate_decay_patience=config['train'] ['learning_rate_decay_patience'], learning_rate_decay_min_lr=config['train'] ['learning_rate_decay_min_lr'])
#train_imgs = parse_annotation(config['train']['train_annot_folder'], # config['train']['train_image_folder']) train_imgs = pickle.load(open('dataset/annotation.pkl', 'rb')) yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) random.shuffle(train_imgs) N = len(train_imgs) train_size = int(N * config['train']['validation_split']) print(train_size) valid_imgs = train_imgs[train_size:] train_imgs = train_imgs[:train_size] yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### """ parse annotations of the training set train_imgs is a list of dictionary with the following keys and contents: object : a list of objects in the image, each object is a dictionary with object name, box coordinates (xmin, xmax, ymin, ymax) filename : complete path of the image width : original pixel width height : original pixel height train_labels contains the statistics of the count of each type of object """ train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # split the training set into 80% training and 20% validation if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] print('{} train images and {} validation images'.format( len(train_imgs), len(valid_imgs))) # parse annotations of the testing set test_imgs, test_labels = parse_annotation( config['test']['test_annot_folder'], config['test']['test_image_folder'], config['model']['labels']) if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', len(train_labels)) print('Given labels:\t', len(config['model']['labels'])) print('Overlap labels:\t', len(overlap_labels)) print(overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') print(train_labels.keys()) config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(feature_extractor=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################################################## # Start training the last layer from scratch with warm up ############################################################## yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, test_imgs=test_imgs, pretrained_weights='', nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], train_last_epoch=3, freeze_BN=True, train_mode=False, debug=True)
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) # parse annotations of the training set train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] # detected labels overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no images! Please revise the list of labels in the config.json file!' ) return # construct models yolo = YOLO(architecture=config['model']['architecture'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) # load pretrained models if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) # start trianing yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epoch=config['train']['nb_epoch'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_bs=config['train']['warmup_batches'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
train_imgs = imgs[:train_valid_split] overlap_labels = set(labels).intersection(set(labels.keys())) # print("Seen labels: "+str(labels)) # print("Given labels: "+str(labels) print("Overelap labels: " + str(overlap_labels)) if len(overlap_labels) < len(labels): print("Some labels have no image! Please check it.") yolo = YOLO(architecture=architecture, input_size=input_size, labels=labels, max_box_per_img=max_box_per_img, anchors=anchors) yolo.train(train_imgs, valid_imgs, train_times, valid_times, nb_epoch, learning_rate, batch_size, warmup_batches, object_scale, no_object_scale, coord_scale, class_scale, saved_weights_name=saved_weights_name)
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations # 预处理部分 ############################### # parse annotations of the training set """ train_imgs是一个list,每个元素是一个dict,里面包括了每个图片的信息: img['filename']: str, 图片完整路径 img['width']: int, 图片宽度 img['height']: int, 图片长度 img['object']: list, 所有标注object的list,每个object是一个dict obj['name']: str object的label名字 obj['xmin']: int obj['xmax']: int obj['ymin']: int obj['ymax']: int train label是一个dictionary,key为label,value为出现的次数 """ train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set np.random.shuffle(train_imgs) if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8 * len(train_imgs)) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') print(f"labels:{train_labels}") config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### """ 这里创建了一个yolo类型的对象 模型特征提取部分使用config里backend的设置 注意这里的模型建立只对模型的特征提取部分,即卷积部分做了构建,没有对后面的分类部分,即全连接层做构建 labels是config中定义的labels,当空时为全部找到的label(看preprocessing的代码) max_box_per_image顾名思义,但是注意在config中的anchors的设置数量一定要一样,不然没有意义 anchors到底是什么可以看论文中对anchor权重的说明。 """ yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################### # Load the pretrained weights (if any) """ 注意这里load的weight是load之前训练好的 也就是说,之前跑过一遍这个代码训练出来的weight才能fit上去。 因为其实这里结构比较特殊,特征提取部分被独立成一个layer了 """ ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### # parse annotations of the training set if 'csv' in config['train']['train_annot_folder']: train_imgs, train_labels = parse_csv_annotations( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) else: train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): if 'csv' in config['valid']['valid_annot_folder']: valid_imgs, valid_labels = parse_csv_annotations( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_size=config['model']['input_size'], labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'])
def trainer(config_path): with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if 'logdir' in config['train']: logdir = config['train']['logdir'] else: Exception("В конфиге должна быть указана папка для логов!") logdir = os.path.dirname(config_path) + '/' + logdir os.makedirs(logdir, exist_ok=True) shutil.copy(config_path, logdir + 'config.json') ############################### # Parse the annotations ############################### # parse annotations of the training set train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print('Some labels have no annotations! Please revise the list of labels in the config.json file!') return else: print('No labels are provided. Train on all seen labels.') config['model']['labels'] = train_labels.keys() ############################### # Construct the model ############################### yolo = YOLO(architecture = config['model']['architecture'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in {}".format(config['train']['pretrained_weights'])) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### freq = config['train']['weights_saving_freq'] if 'weights_saving_freq' in config['train'] else 0 yolo.train(train_imgs = train_imgs, valid_imgs = valid_imgs, train_times = config['train']['train_times'], valid_times = config['valid']['valid_times'], nb_epoch = config['train']['nb_epoch'], learning_rate = config['train']['learning_rate'], batch_size = config['train']['batch_size'], warmup_epochs = config['train']['warmup_epochs'], object_scale = config['train']['object_scale'], no_object_scale = config['train']['no_object_scale'], coord_scale = config['train']['coord_scale'], class_scale = config['train']['class_scale'], saved_weights_name = config['train']['saved_weights_name'], saving_freq = freq, debug = config['train']['debug'], logdir = logdir)