示例#1
0
        new_model = build_model()
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)

    adam = keras.optimizers.Adam(lr=1e-4,
                                 beta_1=0.9,
                                 beta_2=0.99,
                                 epsilon=1e-08,
                                 decay=5E-6)
    # sgd = keras.optimizers.SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
    new_model.compile(optimizer=adam,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    # new_model.compile(optimizer=adam, loss=[focal_loss(alpha=.25, gamma=2)], metrics=['accuracy'])

    print(new_model.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(train_gen(),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=int(get_available_cpus() * 0.80))
示例#2
0
        model_checkpoint = MyCbk(model)
    else:
        model = build_encoder_decoder()
        final = build_refinement(model)
        if pretrained_path is not None:
            final.load_weights(pretrained_path)
        else:
            migrate_model(final)

    decoder_target = tf.placeholder(dtype='float32',
                                    shape=(None, None, None, None))
    final.compile(optimizer='nadam',
                  loss=overall_loss,
                  target_tensors=[decoder_target])

    print(final.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    final.fit_generator(train_gen(),
                        steps_per_epoch=num_train_samples // batch_size,
                        validation_data=valid_gen(),
                        validation_steps=num_valid_samples // batch_size,
                        epochs=epochs,
                        verbose=1,
                        callbacks=callbacks,
                        use_multiprocessing=True,
                        workers=int(get_available_cpus() / 2))
    #     count = 0
    #     for i in encoder_decoder.layers:
    #         count += 1
    #     print(count)
    #     for i in refinement.layers:
    #         count += 1
    #     print(count)

    # sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    refinement.compile(optimizer='nadam', loss=alpha_prediction_loss)

    print(refinement.summary())

    # Summarize then go!
    num_cpu = get_available_cpus()
    workers = int(round(num_cpu / 2))

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    refinement.fit_generator(train_gen(),
                             steps_per_epoch=num_train_samples // batch_size,
                             validation_data=valid_gen(),
                             validation_steps=num_valid_samples // batch_size,
                             epochs=epochs,
                             verbose=1,
                             callbacks=callbacks,
                             use_multiprocessing=False,
                             workers=workers)
示例#4
0
        # rewrite the callback: saving through the original model and not the multi-gpu model.
        model_checkpoint = MyCbk(model)
    else:
        new_model = build_model()
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)

    sgd = keras.optimizers.SGD(lr=1e-5,
                               momentum=0.9,
                               nesterov=True,
                               decay=1e-6)
    # adam = keras.optimizers.Adam(lr=0.001)
    new_model.compile(optimizer=sgd, loss=triplet_loss)

    print(new_model.summary())

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(DataGenSequence('train'),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=DataGenSequence('valid'),
                            validation_steps=num_lfw_valid_samples //
                            batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=get_available_cpus() // 2)
示例#5
0
    else:
        new_model = build_model()
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)

    # finetune the whole network together.
    for layer in new_model.layers:
        layer.trainable = True

    sgd = keras.optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    new_model.compile(optimizer=sgd, loss=alpha_prediction_loss)

    print(new_model.summary())

    # Summarize then go!
    num_cpu = get_available_cpus()
    workers = int(round(num_cpu / 2))

    # Final callbacks
    callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]

    # Start Fine-tuning
    new_model.fit_generator(train_gen(),
                            steps_per_epoch=num_train_samples // batch_size,
                            validation_data=valid_gen(),
                            validation_steps=num_valid_samples // batch_size,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            use_multiprocessing=True,
                            workers=workers