示例#1
0
def train_multiple_networks():
    '''
    Trains list of CNNs.
    '''

    metadata = data_preparation.load_metadata()
    metadata, labels = data_preparation.preprocess_metadata(metadata)
    train, valid = data_preparation.stratify_train_test_split(metadata)

    # for these image sizes, we don't need gradient_accumulation to achieve BATCH_SIZE = 256
    optimizer = 'adam'
    if params.BATCH_SIZE < 256:
        optimizer = gradient_accumulation.AdamAccumulate(
            lr=params.LEARNING_RATE, accum_iters=params.ACCUMULATION_STEPS)

    base_models = [
        [MobileNet, params.MOBILENET_IMG_SIZE, MobileNet_preprocess_input],
        [InceptionResNetV2, params.INCEPTIONRESNETV2_IMG_SIZE,
            InceptionResNetV2_preprocess_input],
        [VGG19, params.VGG19_IMG_SIZE, VGG19_preprocess_input],
        [InceptionV3, params.INCEPTIONV3_IMG_SIZE, InceptionV3_preprocess_input],
        [MobileNetV2, params.MOBILENETV2_IMG_SIZE, MobileNetV2_preprocess_input],
        [NASNetLarge, params.NASNETLARGE_IMG_SIZE, NASNetLarge_preprocess_input],
    ]

    # for [_Model, input_shape, preprocess_input] in base_models:
    #     train_model(_Model, input_shape, preprocess_input,
    #                 train, valid, labels,
    #                 create_simple_model, optimizer, 'simple')

    for [_Model, input_shape, preprocess_input] in base_models:
        train_model(_Model, input_shape, preprocess_input,
                    train, valid, labels,
                    create_attention_model, optimizer, 'attention')
示例#2
0
def loop_in_combinations(callback, image_size=None, transfer_learing=True, use_preprocess_input=False):

    '''
    Trains list of CNNs.
    '''

    metadata = data_preparation.load_metadata()
    metadata, labels = data_preparation.preprocess_metadata(metadata)
    train, valid = data_preparation.stratify_train_test_split(metadata)

    # for these image sizes, we don't need gradient_accumulation to achieve BATCH_SIZE = 256
    optimizer = 'adam'
    if params.DEFAULT_OPTIMIZER != optimizer:
        optimizer = gradient_accumulation.AdamAccumulate(
            lr=params.LEARNING_RATE, accum_iters=params.ACCUMULATION_STEPS)

    unfrozen = 'unfrozen_'
    if transfer_learing:
        unfrozen = ''
    custom_layers = [
        [create_attention_model, unfrozen+'latest_attention'],
        [create_simple_model, unfrozen+'latest_simple'],
    ]

    for [custome_layer, name_prefix] in custom_layers:
        for [_Model, input_shape, preprocess_input] in base_models:
            _image_size = image_size
            if _image_size is None:
                _image_size = input_shape
            _preprocess_input = preprocess_input
            if not use_preprocess_input:
                _preprocess_input = None
            callback(_Model, _image_size, transfer_learing, _preprocess_input,
                        train, valid, labels,
                        custome_layer, optimizer, name_prefix)
示例#3
0
def predict():
    '''
    Predicts the inout score.
    '''

    metadata = data_preparation.load_metadata(os.path.join(params.DATA_FOLDER, 'demo', 'pictures360new'))
    
    #metadata, labels = data_preparation.preprocess_metadata(metadata)
    labels = ['scenic', 'non-scenic']

    # for these image sizes, we don't need gradient_accumulation to achieve BATCH_SIZE = 256
    optimizer = 'adam'
    if False:  # params.BATCH_SIZE < 256:
        optimizer = gradient_accumulation.AdamAccumulate(
            lr=params.LEARNING_RATE, accum_iters=params.ACCUMULATION_STEPS)

    base_models = [
        [VGG19, params.VGG19_IMG_SIZE, VGG19_preprocess_input],
    ]
    for [_Model, input_shape, preprocess_input] in base_models:
        predict_model(_Model, input_shape, preprocess_input,
                      metadata, labels,
                      create_attention_model, optimizer, 'attention')