Exemplo n.º 1
0
def main(args, train_set, class_num, pre_ckpt, model_def, depth_multiplier,
         is_augmenter, image_size, output_size, batch_size, rand_seed,
         max_nrof_epochs, init_learning_rate, learning_rate_decay_factor,
         obj_weight, noobj_weight, wh_weight, obj_thresh, iou_thresh,
         vaildation_split, log_dir, is_prune, initial_sparsity, final_sparsity,
         end_epoch, frequency):
    # Build path
    log_dir = (Path(log_dir) /
               datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
               )  # type: Path
    ckpt_weights = log_dir / 'yolo_weights.h5'
    ckpt = log_dir / 'yolo_model.h5'
    if not log_dir.exists():
        log_dir.mkdir(parents=True)
    write_arguments_to_file(args, str(log_dir / 'args.txt'))

    # Build utils

    h = Helper(f'data/{train_set}_img_ann.npy', class_num,
               f'data/{train_set}_anchor.npy',
               np.reshape(np.array(image_size), (-1, 2)),
               np.reshape(np.array(output_size), (-1, 2)), vaildation_split)
    h.set_dataset(batch_size, rand_seed, is_training=(is_augmenter == 'True'))

    # Build network
    if False:
        network = eval(model_def)  # type :yolo_mobilev2
        yolo_model, yolo_model_wrapper = network(
            [image_size[0], image_size[1], 3],
            len(h.anchors[0]),
            class_num,
            alpha=depth_multiplier)
    else:
        yolo_model, yolo_model_wrapper,output_size = \
            convert.make_model(model_def, model_def + '.weights',
                               f'data/{train_set}_anchor.npy',
                               h.train_epoch_step * end_epoch,
                               initial_sparsity,
                               final_sparsity,
                               frequency)
        tf.keras.models.save_model(yolo_model,
                                   'pre_prun{model_def}',
                                   include_optimizer=False)
    if pre_ckpt != None and pre_ckpt != 'None' and pre_ckpt != '':
        if 'h5' in pre_ckpt:
            yolo_model_wrapper.load_weights(str(pre_ckpt))
            print(INFO, f' Load CKPT {str(pre_ckpt)}')
        else:
            print(ERROR, ' Pre CKPT path is unvalid')

    # prune model
    pruning_params = {
        'pruning_schedule':
        sparsity.PolynomialDecay(initial_sparsity=.50,
                                 final_sparsity=.90,
                                 begin_step=0,
                                 end_step=h.train_epoch_step * end_epoch,
                                 frequency=frequency)
    }

    train_model = yolo_model_wrapper

    train_model.compile(
        keras.optimizers.Adam(lr=init_learning_rate,
                              decay=learning_rate_decay_factor),
        loss=[
            create_loss_fn(h, obj_thresh, iou_thresh, obj_weight, noobj_weight,
                           wh_weight, layer)
            for layer in range(
                len(train_model.output) if isinstance(train_model.output, list
                                                      ) else 1)
        ],
        metrics=[
            Yolo_Precision(obj_thresh, name='p'),
            Yolo_Recall(obj_thresh, name='r')
        ])
    """ NOTE fix the dataset output shape """
    shapes = (train_model.input.shape, tuple(h.output_shapes))
    h.train_dataset = h.train_dataset.apply(assert_element_shape(shapes))
    h.test_dataset = h.test_dataset.apply(assert_element_shape(shapes))
    """ Callbacks """
    if is_prune == 'True':
        cbs = [
            sparsity.UpdatePruningStep(),
            sparsity.PruningSummaries(log_dir=str(log_dir), profile_batch=0)
        ]
    else:
        cbs = [TensorBoard(str(log_dir), update_freq='batch', profile_batch=3)]

    # Training
    try:
        train_model.fit(h.train_dataset,
                        epochs=max_nrof_epochs,
                        steps_per_epoch=h.train_epoch_step,
                        callbacks=cbs,
                        validation_data=h.test_dataset,
                        validation_steps=int(h.test_epoch_step *
                                             h.validation_split))
    except KeyboardInterrupt as e:
        pass
        train_model.summary()
    if is_prune == 'True':
        final_model = tmot.sparsity.keras.strip_pruning(train_model)
        final_model.summary()
        model_name = 'sparse1.h5'
        yolo_model = tmot.sparsity.keras.strip_pruning(yolo_model)
        tf.keras.models.save_model(yolo_model,
                                   model_name,
                                   include_optimizer=False)
        tf.keras.models.save_model(yolo_model,
                                   '{model_def}.tf',
                                   include_optimizer=False)
    else:
        keras.models.save_model(yolo_model, str(ckpt))
        print()
        print(INFO, f' Save Model as {str(ckpt)}')
Exemplo n.º 2
0
    num_train = len(lines) - int(len(lines) * val_split)
    num_val = int(len(lines) * val_split)
    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.

    model.compile(optimizer=Adam(lr=1e-3), loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
    train_set = create_dataset(lines[:num_train], batch_size, input_shape, anchors, num_classes)
    vail_set = create_dataset(lines[num_train:], batch_size, input_shape, anchors, num_classes)

    shapes = (tuple([ins.shape for ins in model.input]), tuple(tf.TensorShape([batch_size, ])))

    train_set = train_set.apply(assert_element_shape(shapes))
    vail_set = vail_set.apply(assert_element_shape(shapes))

    try:
        model.fit(train_set,
                  epochs=10,
                  validation_data=vail_set, validation_steps=40,
                  steps_per_epoch=max(1, num_train // batch_size),
                  callbacks=[logging, checkpoint],
                  verbose=0)
    except KeyboardInterrupt:
        pass

    # train_set = YOLOSequence(lines[:num_train], batch_size, input_shape, anchors, num_classes)
    # model.fit_generator(train_set,
    #                     epochs=20,
Exemplo n.º 3
0
def runTrainingDetection(uuid,
                         datasetDir,
                         numOfClass,
                         obj_thresh=0.7,
                         iou_thresh=0.5,
                         obj_weight=1.0,
                         noobj_weight=1.0,
                         wh_weight=1.0,
                         max_nrof_epochs=50,
                         batch_size=96,
                         vaildation_split=0.2):
    config = tf.ConfigProto()
    sess = tf.Session(config=config)
    keras.backend.set_session(sess)

    datasetList = [os.path.join(datasetDir, f) for f in os.listdir(datasetDir)]
    image_list = []

    #img_ann_f = open(os.path.join(datasetDir, 'dataset_img_ann.txt'), 'w+')

    for fileName in datasetList:
        if '.jpg' in fileName:
            #        print('/home/m5stack/VTrainingService/' + fileName, file=img_ann_f)
            image_list.append('/home/m5stack/VTrainingService/' + fileName)

    #img_ann_f.close()

    image_path_list = np.array(
        image_list
    )  #np.loadtxt(os.path.join(datasetDir, 'dataset_img_ann.txt'), dtype=str)

    ann_list = list(image_path_list)
    ann_list = [re.sub(r'JPEGImages', 'labels', s) for s in ann_list]
    ann_list = [re.sub(r'.jpg', '.txt', s) for s in ann_list]

    lines = np.array([
        np.array([
            image_path_list[i],
            np.loadtxt(ann_list[i], dtype=float, ndmin=2),
            np.array(skimage.io.imread(image_path_list[i]).shape[0:2])
        ]) for i in range(len(ann_list))
    ])

    np.save(os.path.join(datasetDir, 'dataset_img_ann.npy'), lines)

    #print('dataset npu>>>', os.path.join(datasetDir, 'dataset_img_ann.npy'))

    h = Helper(os.path.join(datasetDir, 'dataset_img_ann.npy'), numOfClass,
               'voc_anchor.npy', np.reshape(np.array((224, 320)), (-1, 2)),
               np.reshape(np.array((7, 10, 14, 20)),
                          (-1, 2)), vaildation_split)

    h.set_dataset(batch_size, 6)

    network = eval('yolo_mobilev1')  # type :yolo_mobilev2
    yolo_model, train_model = network([224, 320, 3],
                                      len(h.anchors[0]),
                                      numOfClass,
                                      alpha=0.5)

    train_model.compile(
        RAdam(),
        loss=[
            create_loss_fn(h, obj_thresh, iou_thresh, obj_weight, noobj_weight,
                           wh_weight, layer)
            for layer in range(
                len(train_model.output) if isinstance(train_model.output, list
                                                      ) else 1)
        ],
        metrics=[
            Yolo_Precision(obj_thresh, name='p'),
            Yolo_Recall(obj_thresh, name='r')
        ])

    shapes = (train_model.input.shape, tuple(h.output_shapes))
    h.train_dataset = h.train_dataset.apply(assert_element_shape(shapes))
    h.test_dataset = h.test_dataset.apply(assert_element_shape(shapes))

    #print('train', h.train_dataset, '\n\r\n\rtest', h.test_dataset)

    try:
        train_model.fit(h.train_dataset,
                        epochs=max_nrof_epochs,
                        steps_per_epoch=10,
                        validation_data=h.test_dataset,
                        validation_steps=1)
    except Exception as e:
        return (-45, 'Unexpected error found during training, err:', e)

    keras.models.save_model(
        yolo_model, f'{localSSDLoc}trained_h5_file/{uuid}_mbnet5_yolov3.h5')

    converter = tf.lite.TFLiteConverter.from_keras_model_file(
        f'{localSSDLoc}trained_h5_file/{uuid}_mbnet5_yolov3.h5',
        custom_objects={
            'RAdam':
            RAdam,
            'loss_softmax_cross_entropy_with_logits_v2':
            loss_softmax_cross_entropy_with_logits_v2
        })
    tflite_model = converter.convert()
    open(f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet5_yolov3_quant.tflite',
         "wb").write(tflite_model)

    subprocess.run([
        f'{nncaseLoc}/ncc',
        f'{localSSDLoc}trained_tflite_file/{uuid}_mbnet5_yolov3_quant.tflite',
        f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet5_yolov3.kmodel', '-i',
        'tflite', '-o', 'k210model', '--dataset', datasetDir
    ])

    if os.path.isfile(
            f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet5_yolov3.kmodel'):
        return (
            0, f'{localSSDLoc}trained_kmodel_file/{uuid}_mbnet5_yolov3.kmodel')
    else:
        return (-16,
                'Unexpected Error Found During generating Kendryte k210model.')
Exemplo n.º 4
0
def main(annotation_path, classes_path, anchors_path, alpha, weights_path, learning_rate, epochs, augment):
    # annotation_path = 'train.txt'
    # classes_path = 'model_data/voc_classes.txt'
    # anchors_path = 'model_data/tiny_yolo_anchors.txt'
    log_dir = Path('logs')
    log_dir = log_dir / datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (224, 320)  # multiple of 32, hw
    batch_size = 16

    """ Set the Model """
    # model = create_tiny_model(input_shape, anchors, num_classes, weights_path='model_data/tiny_yolo_weights.h5')
    # model = create_model(input_shape, anchors, num_classes, weights_path='model_data/yolo_weights.h5')  # make sure you know what you freeze
    model, model_body = create_mobile_yolo(input_shape, anchors, num_classes, alpha, weights_path)  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(str(log_dir) + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                 monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    tf.set_random_seed(10101)
    num_train = len(lines) - int(len(lines) * val_split)
    num_val = int(len(lines) * val_split)
    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.

    model.compile(optimizer=Adam(lr=learning_rate), loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
    train_set = create_dataset(lines[:num_train], batch_size, input_shape, anchors, num_classes, augment == 'True')
    vail_set = create_dataset(lines[num_train:], batch_size, input_shape, anchors, num_classes, random=False)

    shapes = (tuple([ins.shape for ins in model.input]), tuple(tf.TensorShape([batch_size, ])))

    train_set = train_set.apply(assert_element_shape(shapes))
    vail_set = vail_set.apply(assert_element_shape(shapes))

    try:
        model.fit(train_set,
                  epochs=epochs,
                  validation_data=vail_set, validation_steps=40,
                  steps_per_epoch=max(1, num_train // batch_size),
                  callbacks=[logging, checkpoint],
                  verbose=1)
    except KeyboardInterrupt:
        pass

    # train_set = YOLOSequence(lines[:num_train], batch_size, input_shape, anchors, num_classes)
    # model.fit_generator(train_set,
    #                     epochs=20,
    #                     steps_per_epoch=max(1, num_train // batch_size),
    #                     callbacks=[logging, checkpoint],
    #                     use_multiprocessing=True)
    save_model(model, str(log_dir / 'yolo_model.h5'))
    save_model(model_body, str(log_dir / 'yolo_model_body.h5'))