def _main():
    epoch_end_first = 20  #30
    epoch_end_final = 2  #60
    model_name = 'xx'
    log_dir = 'logs/000/'
    model_path = 'model_data/new_small_mobilenets2_trained_weights_final.h5'

    rain_path = '2007_train.txt'
    val_path = '2007_val.txt'
    # test_path = '2007_test.txt'
    classes_path = 'class/voc_classes.txt'
    anchors_path = 'anchors/yolo_anchors.txt'

    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))

    with tf.device('/cpu:0'):
        template_model = create_model(
            input_shape,
            anchors,
            num_classes,
            load_pretrained=False,
            freeze_body=1,
            weights_path="")  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)

    with open(train_path) as f:
        train_lines = f.readlines()
    train_lines = train_lines[:1]

    with open(val_path) as f:
        val_lines = f.readlines()
    val_lines = val_lines[:1]

    # with open(test_path) as f:
    #     test_lines = f.readlines()

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    model = multi_gpu_model(template_model, gpus=GPUs)
    print('use the multi_gpu_model for model training ')

    modelsave_checkpoint = ModelSaveCheckpoint(model=template_model,
                                               folder_path=log_dir)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.3,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        batch_size = 32 * GPUs
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        model.compile(optimizer=Adam(lr=1e-3),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })

        model.fit_generator(data_generator_wrapper(train_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                val_lines, batch_size, input_shape, anchors,
                                num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=epoch_end_first,
                            initial_epoch=0,
                            callbacks=[logging, modelsave_checkpoint])
        '''save the model config and weigths on cpu field share the weights'''
        with tf.device('/cpu:0'):
            template_model.save(log_dir + 'frozen_weight.h5')

    if True:
        batch_size = 32 * GPUs
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })
        print('Unfreeze all of the layers.')

        model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[logging, reduce_lr,
                       modelsave_checkpoint])  #early_stopping,
        '''save the model config and weigths on cpu field share the weights'''
        with tf.device('/cpu:0'):
            template_model.save(log_dir + 'final_weight.h5')
Exemple #2
0
def _main():
    epoch_end_first = 1  #30
    epoch_end_final = 2  #60
    model_name = 'distillation_small_mobilenets2'
    log_dir = 'logs/000/'
    model_path = 'model_data/fake_trained_weights_final_mobilenet.h5'
    #teacher_path ="logs/new_yolo_000/last_loss16.9831-val_loss16.9831.h5"
    teacher_path = "model_data/trained_weights_final.h5"

    train_path = '2007_train.txt'
    val_path = '2007_val.txt'
    # test_path = '2007_test.txt'
    classes_path = 'class/voc_classes.txt'
    anchors_path = 'anchors/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))

    is_tiny_version = len(anchors) == 6  # default setting

    if is_tiny_version:
        model = create_tiny_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model, student, teacher = create_model(
            input_shape,
            anchors,
            num_classes,
            load_pretrained=False,
            freeze_body=2,
            weights_path=model_path,
            teacher_weights_path=teacher_path
        )  # make sure you know what you freeze

    #student.summary()
    #student.save_weights("s.h5")

    logging = TensorBoard(log_dir=log_dir)
    checkpointStudent = DistillCheckpointCallback(student, model_name, log_dir)
    checkpointTeacher = DistillCheckpointCallback(teacher, "yolo", log_dir)
    #checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    with open(train_path) as f:
        train_lines = f.readlines()
    train_lines = train_lines[:1]

    with open(val_path) as f:
        val_lines = f.readlines()
    val_lines = val_lines[:1]
    # with open(test_path) as f:
    #     test_lines = f.readlines()

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    meanAP = AveragePrecision(
        data_generator_wrapper(val_lines, 1, input_shape, anchors,
                               num_classes), num_val, input_shape,
        len(anchors) // 3, anchors, num_classes, log_dir)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_custom_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 1  #4#24#32

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_first,
            initial_epoch=0,
            callbacks=[logging, checkpointStudent, checkpointTeacher])

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{1:.4f}".format(last_loss, last_val_loss)

        model.save(hist + "model_checkpoint.h5")
        student.save_weights(log_dir + "last_" + hist + ".h5")
        student.save_weights(log_dir + model_name +
                             '_trained_weights_stage_1.h5')
        teacher.save_weights(log_dir + "teacher" + model_name +
                             '_trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(student.layers)):
            student.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_custom_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 4  #32 note that more GPU memory is required after unfreezing the body

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(train_lines, batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size,
                                                   input_shape, anchors,
                                                   num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[
                logging, reduce_lr, checkpointStudent, checkpointTeacher
            ])  #, early_stopping
        model.save_weights(log_dir + model_name + '_trained_weights_final.h5')

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{0:.4f}".format(last_loss, last_val_loss)

        student.save_weights(log_dir + "last_" + hist + ".h5")
        student.save_weights(log_dir + model_name +
                             '_trained_weights_final.h5')
        teacher.save_weights(log_dir + "teacher" + model_name +
                             '_trained_weights_final.h5')
def _main():
    epoch_end_first = 50
    epoch_end_final = 100
    model_name = 'eld_small_mobilenets2'
    log_dir = 'logs/000/'
    model_path = 'model_data/eld_small_mobilenets2_trained_weights_final.h5'

    annotation_path = 'elderly.txt'
    classes_path = 'class/elderly_classes.txt'
    anchors_path = 'anchors/elderly_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416,416) # multiple of 32, hw
    num_anchors = len(anchors)
    image_input = Input(shape=(None, None, 3))
    
    is_tiny_version = len(anchors)==6 # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape, anchors, num_classes,
            freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model = create_model(input_shape, anchors, num_classes,load_pretrained=False,
            freeze_body=2, weights_path=model_path) # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
     

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)

    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    train_lines = lines[:num_train]
    val_lines = lines[num_train:]

    num_train = int(len(train_lines))
    num_val = int(len(val_lines))

    #print('Train on {} samples, val on {} samples.'.format(num_train, num_val))
    #print('Train on {} samples, val on {} samples.'.format( len(train_lines), len(val_lines)))
    #print(train_lines)
    #print(val_lines)

    #meanAP = AveragePrecision(data_generator_wrapper(val_lines , 1 , input_shape, anchors, num_classes) , num_val , input_shape , len(anchors)//3 , anchors ,num_classes,log_dir)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3), loss={
            # use custom yolo_loss Lambda layer.
             'yolo_loss' : lambda y_true, y_pred: y_pred})

        batch_size = 1#32

        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        history = model.fit_generator(data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
                validation_steps=max(1, num_val//batch_size),
                epochs=epoch_end_first,
                initial_epoch=0,
                callbacks=[logging, checkpoint])#, meanAP

      
        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{1:.4f}".format(last_loss,last_val_loss)

        model.save_weights(log_dir + "last_"+ hist + ".h5")

        model.save_weights(log_dir + model_name+'_trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4), loss={ 
            'yolo_loss' : lambda y_true, y_pred: y_pred}) # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size =  20#32 note that more GPU memory is required after unfreezing the body


        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        history = model.fit_generator(data_generator_wrapper(train_lines, batch_size, input_shape, anchors, num_classes),
            steps_per_epoch=max(1, num_train//batch_size),
            validation_data=data_generator_wrapper(val_lines, batch_size, input_shape, anchors, num_classes),
            validation_steps=max(1, num_val//batch_size),
            epochs=epoch_end_final,
            initial_epoch=epoch_end_first,
            callbacks=[logging, checkpoint, reduce_lr ])#, meanAP, early_stopping

        last_loss = history.history['loss'][-1]
        last_val_loss = history.history['val_loss'][-1]

        hist = "loss{0:.4f}-val_loss{0:.4f}".format(last_loss,last_val_loss)

        model.save_weights(log_dir + "last_"+ hist + ".h5")

        model.save_weights(log_dir + model_name + '_trained_weights_final.h5')