Example #1
0
def _main():
    #weights_path = 'model_data/trained_weights_final_mobilenet.h5'
    weights_path = 'model_data/small_mobilenets2_trained_weights_final.h5'
    #weights_path = 'logs/squeezenet_000/squeezenet_trained_weights_final.h5'

    train_path = '2007_train.txt'
    val_path = '2007_val.txt'
    test_path = '2007_test.txt'
    #log_dir = 'logs/logits_only_000/'
    classes_path = 'class/voc_classes.txt'
    anchors_path = 'anchors/yolo_anchors.txt'

    class_names = get_classes(classes_path)
    anchors = get_anchors(anchors_path)

    num_classes = len(class_names)
    num_anchors = len(anchors)  #9

    shape_size = 416
    input_shape = (shape_size, shape_size)  # multiple of 32, hw

    num_layers = num_anchors // 3  #9//3

    #with open(train_path) as f:
    #    train_lines = f.readlines()
    #train_lines = train_lines[:200]

    #with open(val_path) as f:
    #    val_lines = f.readlines()
    #val_lines = val_lines[:150]

    with open(test_path) as f:
        test_lines = f.readlines()
    #test_lines = test_lines[:2]

    #num_train = int(len(train_lines))
    #num_val = int(len(val_lines))
    num_test = int(len(test_lines))

    #declare model

    image_input = Input(shape=(shape_size, shape_size, 3))

    try:
        eval_model = load_model(model_path, compile=False)
    except:
        eval_model = yolo_body(image_input, num_anchors // 3,
                               num_classes)  #9//3
        eval_model.load_weights(weights_path)

    yolo_out = []
    fmap = shape_size // 32
    mapsize = [1, 2, 4]

    if num_layers == 3:
        ly_out = [-3, -2, -1]
    elif num_layers == 2:
        ly_out = [-2, -1]
    else:
        ly_out = [-1]

    # return the constructed network architecture
    # class+5
    for ly in range(num_layers):
        yolo_layer = Reshape(
            (fmap * mapsize[ly], fmap * mapsize[ly], 3,
             (num_classes + 5)))(eval_model.layers[ly_out[ly]].output)

        yolo_out.append(yolo_layer)

    eval_model = Model(inputs=eval_model.input, outputs=yolo_out)
    eval_model._make_predict_function()

    batch_size = 1

    all_detections = [[] for i in range(num_classes)]
    all_annotations = [[] for i in range(num_classes)]

    count_detections = [[0 for i in range(num_classes)]
                        for i in range(num_layers)]
    total_object = 0

    datagen = data_generator_wrapper_eval(test_lines, batch_size, input_shape,
                                          anchors, num_classes, eval_model)

    print("{} test data".format(num_test))
    for n in tqdm(range(num_test)):  #num_test
        img, flogits, mlogits = next(datagen)

        for l in range(num_layers):
            #print( "layer" + str(l) )
            arrp = flogits[l]
            box = np.where(arrp[..., 4] > 0)
            box = np.transpose(box)

            for i in range(len(box)):
                #print("obj" + str(i) )
                #print( tuple(box[i]) )
                #detection_label =  np.argmax( flogits[l][tuple(box[i])][5:])
                annotation_label = np.argmax(flogits[l][tuple(box[i])][5:])

                #print( "{} ({}) {} == ({}) {} ".format(l, detection_label, class_names[  detection_label ] ,annotation_label, class_names[  annotation_label ] ) )

                all_detections[annotation_label].append(mlogits[l][tuple(
                    box[i])])
                all_annotations[annotation_label].append(flogits[l][tuple(
                    box[i])])

                count_detections[l][annotation_label] += 1
                total_object += 1

    print(len(all_detections))
    print(len(all_annotations))
    print(count_detections)
    print(total_object)

    conf_thres = 0.5
    iou_thres = 0.45

    average_precisions = {}

    for label in tqdm(range(num_classes)):

        false_positives = np.zeros((0, ))
        true_positives = np.zeros((0, ))
        scores = np.zeros((0, ))

        num_detect = len(all_detections[label])
        for det in range(num_detect):

            detect_box = all_detections[label][det][..., 0:4]
            detect_conf = all_detections[label][det][..., 4]
            detect_label = np.argmax(all_detections[label][det][..., 5:])

            annot_box = all_annotations[label][det][..., 0:4]
            annot_conf = all_annotations[label][det][..., 4]
            detect_label = np.argmax(all_detections[label][det][..., 5:])

            iou = numpy_box_iou(detect_box, annot_box)

            scores = np.append(scores, detect_conf)

            if (iou > iou_thres and detect_conf > conf_thres
                    and (label == detect_label)):
                #print( best_iou[tuple(box[i])] )
                #print("pos")
                false_positives = np.append(false_positives, 0)
                true_positives = np.append(true_positives, 1)
            else:
                #print("neg")
                false_positives = np.append(false_positives, 1)
                true_positives = np.append(true_positives, 0)

        indices = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives = true_positives[indices]
        #print(true_positives)

        false_positives = np.cumsum(false_positives)
        true_positives = np.cumsum(true_positives)
        #print(true_positives)

        recall = true_positives / num_detect
        #print( recall )
        precision = true_positives / np.maximum(
            true_positives + false_positives,
            np.finfo(np.float64).eps)
        #print( precision )

        average_precision = compute_ap(recall, precision)
        average_precisions[label] = average_precision

    print("loaded weights {}".format(weights_path))

    #print(average_precisions)

    for label, average_precision in average_precisions.items():
        print(class_names[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Example #2
0
def _main():
    epoch_end_first = 1  #30
    epoch_end_final = 2  #60
    model_name = 'distillation_small_mobilenets2'
    log_dir = 'logs/000/'
    model_path = 'model_data/fake_trained_weights_final_mobilenet.h5'
    #teacher_path ="logs/new_yolo_000/last_loss16.9831-val_loss16.9831.h5"
    teacher_path = "model_data/trained_weights_final.h5"

    train_path = '2007_train.txt'
    val_path = '2007_val.txt'
    # test_path = '2007_test.txt'
    classes_path = 'class/voc_classes.txt'
    anchors_path = 'anchors/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))

    is_tiny_version = len(anchors) == 6  # default setting

    if is_tiny_version:
        model = create_tiny_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model, student, teacher = create_model(
            input_shape,
            anchors,
            num_classes,
            load_pretrained=False,
            freeze_body=2,
            weights_path=model_path,
            teacher_weights_path=teacher_path
        )  # make sure you know what you freeze

    #student.summary()
    #student.save_weights("s.h5")

    logging = TensorBoard(log_dir=log_dir)
    checkpointStudent = DistillCheckpointCallback(student, model_name, log_dir)
    checkpointTeacher = DistillCheckpointCallback(teacher, "yolo", log_dir)
    #checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    with open(train_path) as f:
        train_lines = f.readlines()
    train_lines = train_lines[:1]

    with open(val_path) as f:
        val_lines = f.readlines()
    val_lines = val_lines[:1]
    # with open(test_path) as f:
    #     test_lines = f.readlines()

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    meanAP = AveragePrecision(
        data_generator_wrapper(val_lines, 1, input_shape, anchors,
                               num_classes), num_val, input_shape,
        len(anchors) // 3, anchors, num_classes, log_dir)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_custom_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 1  #4#24#32

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_first,
            initial_epoch=0,
            callbacks=[logging, checkpointStudent, checkpointTeacher])

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{1:.4f}".format(last_loss, last_val_loss)

        model.save(hist + "model_checkpoint.h5")
        student.save_weights(log_dir + "last_" + hist + ".h5")
        student.save_weights(log_dir + model_name +
                             '_trained_weights_stage_1.h5')
        teacher.save_weights(log_dir + "teacher" + model_name +
                             '_trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(student.layers)):
            student.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_custom_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 4  #32 note that more GPU memory is required after unfreezing the body

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[
                logging, reduce_lr, checkpointStudent, checkpointTeacher
            ])  #, early_stopping
        model.save_weights(log_dir + model_name + '_trained_weights_final.h5')

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{0:.4f}".format(last_loss, last_val_loss)

        student.save_weights(log_dir + "last_" + hist + ".h5")
        student.save_weights(log_dir + model_name +
                             '_trained_weights_final.h5')
        teacher.save_weights(log_dir + "teacher" + model_name +
                             '_trained_weights_final.h5')
from keras.models import Model, load_model
from keras.layers import Input
from utils.train_tool import get_classes, get_anchors
from model.small_mobilenet import yolo_body
from model.yolo3 import tiny_yolo_body

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

#model_path = 'model_data/small_mobilenet_trained_weights_final.h5'
model_path = 'model_data/tiny_yolo.h5'
classes_path = 'class/coco_classes.txt'
anchors_path = 'anchors/yolo_anchors.txt'

class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)

num_classes = len(class_names)
num_anchors = len(anchors)

#yolo_model =  yolo_body(Input(shape=(None,None,3)), 3, num_classes)
#yolo_model.load_weights(model_path)
yolo_model = load_model(model_path)

yolo_model.summary()

yolo3 = yolo_model.layers[-3].output
yolo2 = yolo_model.layers[-2].output
#yolo1 = yolo_model.layers[-1].output

#new_model = Model( inputs= yolo_model.input , outputs=[yolo3,yolo2] )
def _main():
    epoch_end_first = 20  #30
    epoch_end_final = 2  #60
    model_name = 'xx'
    log_dir = 'logs/000/'
    model_path = 'model_data/new_small_mobilenets2_trained_weights_final.h5'

    rain_path = '2007_train.txt'
    val_path = '2007_val.txt'
    # test_path = '2007_test.txt'
    classes_path = 'class/voc_classes.txt'
    anchors_path = 'anchors/yolo_anchors.txt'

    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))

    with tf.device('/cpu:0'):
        template_model = create_model(
            input_shape,
            anchors,
            num_classes,
            load_pretrained=False,
            freeze_body=1,
            weights_path="")  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)

    with open(train_path) as f:
        train_lines = f.readlines()
    train_lines = train_lines[:1]

    with open(val_path) as f:
        val_lines = f.readlines()
    val_lines = val_lines[:1]

    # with open(test_path) as f:
    #     test_lines = f.readlines()

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    model = multi_gpu_model(template_model, gpus=GPUs)
    print('use the multi_gpu_model for model training ')

    modelsave_checkpoint = ModelSaveCheckpoint(model=template_model,
                                               folder_path=log_dir)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.3,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        batch_size = 32 * GPUs
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        model.compile(optimizer=Adam(lr=1e-3),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })

        model.fit_generator(data_generator_wrapper(train_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                val_lines, batch_size, input_shape, anchors,
                                num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=epoch_end_first,
                            initial_epoch=0,
                            callbacks=[logging, modelsave_checkpoint])
        '''save the model config and weigths on cpu field share the weights'''
        with tf.device('/cpu:0'):
            template_model.save(log_dir + 'frozen_weight.h5')

    if True:
        batch_size = 32 * GPUs
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })
        print('Unfreeze all of the layers.')

        model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[logging, reduce_lr,
                       modelsave_checkpoint])  #early_stopping,
        '''save the model config and weigths on cpu field share the weights'''
        with tf.device('/cpu:0'):
            template_model.save(log_dir + 'final_weight.h5')
def _main():
    epoch_end_first = 50
    epoch_end_final = 100
    model_name = 'eld_small_mobilenets2'
    log_dir = 'logs/000/'
    model_path = 'model_data/eld_small_mobilenets2_trained_weights_final.h5'

    annotation_path = 'elderly.txt'
    classes_path = 'class/elderly_classes.txt'
    anchors_path = 'anchors/elderly_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416,416) # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))
    
    is_tiny_version = len(anchors)==6 # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape, anchors, num_classes,
            freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model = create_model(input_shape, anchors, num_classes,load_pretrained=False,
            freeze_body=2, weights_path=model_path) # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
     

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)

    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    train_lines = lines[:num_train]
    val_lines = lines[num_train:]

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    #print('Train on {} samples, val on {} samples.'.format(num_train, num_val))
    #print('Train on {} samples, val on {} samples.'.format( len(train_lines), len(val_lines)))
    #print(train_lines)
    #print(val_lines)

    #meanAP = AveragePrecision(data_generator_wrapper(val_lines , 1 , input_shape, anchors, num_classes) , num_val , input_shape , len(anchors)//3 , anchors ,num_classes,log_dir)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3), loss={
            # use custom yolo_loss Lambda layer.
             'yolo_loss' : lambda y_true, y_pred: y_pred})

        batch_size = 1#32

        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        history = model.fit_generator(data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
                validation_steps=max(1, num_val//batch_size),
                epochs=epoch_end_first,
                initial_epoch=0,
                callbacks=[logging, checkpoint])#, meanAP

      
        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{1:.4f}".format(last_loss,last_val_loss)

        model.save_weights(log_dir + "last_"+ hist + ".h5")

        model.save_weights(log_dir + model_name+'_trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4), loss={ 
            'yolo_loss' : lambda y_true, y_pred: y_pred}) # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size =  20#32 note that more GPU memory is required after unfreezing the body


        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        history = model.fit_generator(data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
            steps_per_epoch=max(1, num_train//batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size, input_shape, anchors, num_classes),
            validation_steps=max(1, num_val//batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[logging, checkpoint, reduce_lr ])#, meanAP, early_stopping

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{0:.4f}".format(last_loss,last_val_loss)

        model.save_weights(log_dir + "last_"+ hist + ".h5")

        model.save_weights(log_dir + model_name + '_trained_weights_final.h5')