if __name__ == '__main__': with open('./config.yml') as f: cfg = yaml.load(f) try: config = cfg[args.arch.upper()] except AttributeError: raise ValueError('Unknown architecture: {}'.format(args.arch)) default_boxes = generate_default_boxes(config) batch_generator, info = create_batch_generator(args.data_dir, args.data_year, default_boxes, config['image_size'], BATCH_SIZE, args.num_examples, mode='test') try: ssd = create_ssd(NUM_CLASSES, args.arch, args.pretrained_type, args.checkpoint_dir, args.checkpoint_path) except Exception as e: print(e) print('The program is exiting...') sys.exit() os.makedirs('outputs/images', exist_ok=True) os.makedirs('outputs/detects', exist_ok=True) visualizer = ImageVisualizer(info['idx_to_name'],
from anchor import generate_default_boxes from losses import create_losses from settings import * ARCH = 'pre_ssd300-mobilenetv1' CHECKPOINT_DIR = 'checkpoint/pre_mobilenetv1_lite' CHECKPOINT_PATH = 'checkpoint/pre_mobilenetv1_lite/ssd_epoch_200.h5' os.makedirs(CHECKPOINT_DIR, exist_ok=True) default_boxes = generate_default_boxes(INFO[ARCH]) with tf.device('/device:GPU:0'): batch_generator, val_generator, info = create_batch_generator( DATA_DIR, DATA_YEAR, default_boxes, SIZE, BATCH_SIZE, NUM_BATCHES, mode='train', augmentation=['flip']) # the patching algorithm is currently causing bottleneck sometimes dummy = tf.random.normal((1, 300, 300, 3)) ssd = create_pre_ssd_mobilenetv1_lite(weights=None) pretrained_type = 'specified' checkpoint_path = CHECKPOINT_PATH net = init_ssd(ssd, pretrained_type, checkpoint_path) criterion = create_losses(NEG_RATIO, NUM_CLASSES) steps_per_epoch = info['length'] // BATCH_SIZE lr_fn = PiecewiseConstantDecay(
if __name__ == '__main__': with open('./config.yml') as f: cfg = yaml.load(f) try: config = cfg[args.arch.upper()] except AttributeError: raise ValueError('Unknown architecture: {}'.format(args.arch)) default_boxes = generate_default_boxes(config) batch_generator, info = create_batch_generator( "./sample_data/images/", "./sample_data/gt/labels.pkl", default_boxes, config['image_size'], batch_size, args.num_examples, mode='test') try: # ssd = get_mobilenet_SSD(image_size=(300,300,3), num_classes=num_classes) ssd = keras.models.load_model("./models/ssd1.h5") except Exception as e: print(e) print('The program is exiting...') sys.exit() os.makedirs('outputs/images', exist_ok=True) os.makedirs('outputs/detects', exist_ok=True) visualizer = ImageVisualizer(info['idx_to_name'],
with open('./config.yml') as f: cfg = yaml.load(f) try: config = cfg[args.arch.upper()] except AttributeError: raise ValueError('Unknown architecture: {}'.format(args.arch)) default_boxes = generate_default_boxes(config) batch_generator, val_generator, info = create_batch_generator( args.data_dir, args.data_year, default_boxes, config['image_size'], args.batch_size, args.num_batches, mode='train', augmentation=args.augment_type, caching_period=args.caching_period, snapshot_path=args.snapshot_path ) # the patching algorithm is currently causing bottleneck sometimes try: ssd = create_ssd(NUM_CLASSES, args.arch, args.pretrained_type, checkpoint_dir=args.checkpoint_dir) except Exception as e: print(e) print('The program is exiting...') sys.exit()
with open('./config.yml') as f: cfg = yaml.load(f) try: config = cfg[args.arch.upper()] except AttributeError: raise ValueError('Unknown architecture: {}'.format(args.arch)) default_boxes = generate_default_boxes(config) batch_generator, val_generator, info = create_batch_generator( "./data/images/", "./data/gt/labels.pkl", default_boxes, config['image_size'], args.batch_size, args.num_batches, mode='train', augmentation=[ 'flip' ]) # the patching algorithm is currently causing bottleneck sometimes try: ssd = get_mobilenet_SSD(image_size=(224, 224, 3), num_classes=NUM_CLASSES) # ssd = keras.models.load_model("./models/ssd_low_batch.h5") # this might need to be changed for layer in ssd.layers: if 'base' in layer.name: layer.trainable = False
with open('./config.yml') as f: cfg = yaml.load(f) try: config = cfg[args.arch.upper()] except AttributeError: raise ValueError('Unknown architecture: {}'.format(args.arch)) default_boxes = generate_default_boxes(config) batch_generator, val_generator, info = create_batch_generator( args.data_dir, args.data_year, default_boxes, config['image_size'], args.batch_size, args.num_batches, mode='train', augmentation=[ 'flip' ]) # the patching algorithm is currently causing bottleneck sometimes try: ssd = create_ssd(NUM_CLASSES, args.arch, args.pretrained_type, checkpoint_dir=args.checkpoint_dir) except Exception as e: print(e) print('The program is exiting...') sys.exit()
if __name__ == '__main__': # Model의 Checkpoints를 저장할 Directory가 없을 경우 생성한다. os.makedirs(args.checkpoint_dir, exist_ok=True) # 실제 SSD 300에 미리 저장되어 있는 Setting값을 가져와서 적용한다.(Anchor, FeatureMapSize 등) with open('./config.yml') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) config = cfg['SSD300'] default_boxes = generate_default_boxes(config) # voc_data.py에서 설정한 Dataset을 Batch형태로서 가져온다. batch_generator, val_generator, info = create_batch_generator( args.data_dir, default_boxes, args.batch_size, args.num_batches, mode='train') # 실제 SSD Model을 설정한다. 만약, Training중이던 Model이 있으면 그대로 가져가서 사용할 수 있다. try: ssd = create_ssd(NUM_CLASSES, args.pretrained_type, checkpoint_dir=args.checkpoint_dir) except Exception as e: print(e) print('The program is exiting...') sys.exit() # Hard negative mining을 적용하여 Loss를 구한다. criterion = create_losses(args.neg_ratio, NUM_CLASSES)
scores = out_scores.numpy() return boxes, classes, scores # Model을 정의하게 되고 실제 Detection한 Image의 결과와 Localization, Label 등을 저장하게 된다. if __name__ == '__main__': with open('./config.yml') as f: cfg = yaml.load(f) config = cfg['SSD300'] default_boxes = generate_default_boxes(config) batch_generator, info = create_batch_generator(args.data_dir, default_boxes, BATCH_SIZE, args.num_examples, mode='test') try: ssd = create_ssd(NUM_CLASSES, args.pretrained_type, args.checkpoint_dir, args.checkpoint_path) except Exception as e: print(e) print('The program is exiting...') sys.exit() os.makedirs('outputs/images', exist_ok=True) os.makedirs('outputs/detects', exist_ok=True) visualizer = ImageVisualizer(info['idx_to_name'], save_dir='outputs/images')