示例#1
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # create the generators
    train_generator, validation_generator = create_generators(args)

    num_classes = train_generator.num_classes()
    model, prediction_model, debug_model = centernet(
        num_classes=num_classes, input_size=args.input_size, freeze_bn=False)

    # create the model
    print(args.resume)
    if args.resume:
        print('Loading model, this may take a second...')
        model.load_weights(args.resume, by_name=True, skip_mismatch=True)

    # compile model
    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),
                  loss={
                      'centernet_loss': lambda y_true, y_pred: y_pred
                  })
    # model.compile(optimizer=SGD(lr=1e-5, momentum=0.9, nesterov=True, decay=1e-5),loss={'centernet_loss': lambda y_true, y_pred:y_pred})

    # print model summary
    # print(model.summary())
    model.summary()
    # create the callbacks
    callbacks = create_callbacks(
        model,
        prediction_model,
        validation_generator,
        args,
    )

    if not args.compute_val_loss:
        validation_generator = None

    # start training
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=args.steps,
                        initial_epoch=0,
                        epochs=args.epochs,
                        verbose=1,
                        callbacks=callbacks,
                        workers=args.workers,
                        use_multiprocessing=args.multiprocessing,
                        max_queue_size=args.max_queue_size,
                        validation_data=validation_generator)

    return 0
示例#2
0
    'sofa': 17,
    'train': 18,
    'tvmonitor': 19
}

model_path = '/home/tienduchoang/Videos/models/keras-centernet/checkpoints/pretrain_model.h5'
num_classes = len(voc_classes)
classes = list(voc_classes.keys())
flip_test = True
nms = True
keep_resolution = False
score_threshold = 0.1
colors = [np.random.randint(0, 256, 3).tolist() for i in range(num_classes)]
model, prediction_model, debug_model = centernet(
    num_classes=num_classes,
    nms=nms,
    flip_test=flip_test,
    freeze_bn=True,
    score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True, skip_mismatch=True)

# image = generator.load_image(i)
image_path = "./data/1.jpg"
image = cv2.imread(image_path)

src_image = image.copy()

c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0

input_size = 512
tgt_w = input_size
示例#3
0
from models.resnet import centernet
import cv2
import numpy as np
from generators.utils import get_affine_transform, affine_transform
#get model
trainmodel, deploymodel, debugmodel = centernet(1)
#load model weight
deploymodel.load_weights('checkpoints/2020-04-11/save_model.h5',
                         by_name=True,
                         skip_mismatch=True)

#print model struct
debugmodel.summary()
#read img and preprocess
img = cv2.imread("1.jpg")
#pre process image
img_w = img.shape[1]
img_h = img.shape[0]
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0
trans_input = get_affine_transform(c, s, (512, 512))
image = cv2.warpAffine(image, trans_input, (512, 512), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
input = image / 255.0
input = input[np.newaxis, :]
#forward
outputs = deploymodel(input)
#get result
bboxs = outputs[0].numpy()
#demo result
示例#4
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # K.set_session(get_session())
    # tf.config.gpu.set_per_process_memory_growth(True)
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    print('phy', physical_devices)

    # if len(physical_devices) > 0:
    #     for k in range(len(physical_devices)):
    #         tf.config.experimental.set_memory_growth(physical_devices[k], True)
    #         print('memory growth:', tf.config.experimental.get_memory_growth(physical_devices[k]))
    # else:
    #     print("Not enough GPU hardware devices available")

    # create the generators
    train_generator, validation_generator = create_generators(args)

    num_classes = train_generator.num_classes()
    model, prediction_model, debug_model = centernet(
        num_classes=num_classes, input_size=args.input_size, freeze_bn=True)

    # create the model
    print('Loading model, this may take a second...')
    # model.load_weights(args.snapshot, by_name=True, skip_mismatch=True)
    model.load_weights(args.snapshot, by_name=True)

    # freeze layers
    if args.freeze_backbone:
        for i in range(190):
            # for i in range(175):
            model.layers[i].trainable = False

    # compile model
    model.compile(optimizer=Adam(lr=1e-3),
                  loss={
                      'centernet_loss': lambda y_true, y_pred: y_pred
                  })
    # model.compile(optimizer=SGD(lr=1e-5, momentum=0.9, nesterov=True, decay=1e-5),
    #               loss={'centernet_loss': lambda y_true, y_pred: y_pred})

    # print model summary
    # print(model.summary())

    # create the callbacks
    callbacks = create_callbacks(
        model,
        prediction_model,
        validation_generator,
        args,
    )

    if not args.compute_val_loss:
        validation_generator = None

    # start training
    return model.fit_generator(generator=train_generator,
                               steps_per_epoch=args.steps,
                               initial_epoch=0,
                               epochs=args.epochs,
                               verbose=1,
                               callbacks=callbacks,
                               workers=args.workers,
                               use_multiprocessing=args.multiprocessing,
                               max_queue_size=args.max_queue_size,
                               validation_data=validation_generator)
示例#5
0
    total_loss = hm_loss + wh_loss + reg_loss
    return total_loss


if __name__ == '__main__':
    from generators.pascal import PascalVocGenerator
    from models.resnet import centernet
    import numpy as np

    generator = PascalVocGenerator('datasets/VOC0712',
                                   'train',
                                   skip_difficult=True,
                                   shuffle_groups=False,
                                   batch_size=1)

    model, prediction_model, debug_model = centernet(num_classes=20)
    debug_model.load_weights(
        'checkpoints/2019-11-06/pascal_31_1.0370_10.7204.h5',
        by_name=True,
        skip_mismatch=True)
    sess = tf.Session()
    for inputs, targets in generator:
        image_input, hm_true, wh_true, reg_true, reg_mask, indices = inputs
        hm_pred, wh_pred, reg_pred = debug_model.predict(image_input)
        np.save('debug/1106/hm_true', hm_true)
        np.save('debug/1106/wh_true', wh_true)
        np.save('debug/1106/reg_true', reg_true)
        np.save('debug/1106/reg_mask', reg_mask)
        np.save('debug/1106/indices', indices)
        np.save('debug/1106/hm_pred', hm_pred)
        np.save('debug/1106/wh_pred', wh_pred)