Esempio n. 1
0
def update_with_query_conf(user_args):
    """
    Update the default YAML configuration with the user's input args from the API query
    """
    # Update the default conf with the user input
    CONF = config.CONF
    for group, val in sorted(CONF.items()):
        for g_key, g_val in sorted(val.items()):
            if g_key in user_args:
                g_val['value'] = json.loads(user_args[g_key])

    # Check and save the configuration
    config.check_conf(conf=CONF)
    config.conf_dict = config.get_conf_dict(conf=CONF)
Esempio n. 2
0
def update_with_saved_conf(saved_conf):
    """
    Update the default YAML configuration with the configuration saved from training
    """
    # Update the default conf with the user input
    CONF = config.CONF
    for group, val in sorted(CONF.items()):
        if group in saved_conf.keys():
            for g_key, g_val in sorted(val.items()):
                if g_key in saved_conf[group].keys():
                    g_val['value'] = saved_conf[group][g_key]

    # Check and save the configuration
    config.check_conf(conf=CONF)
    config.conf_dict = config.get_conf_dict(conf=CONF)
Esempio n. 3
0
def save_default_imagenet_model():
    """
    Create a model in models_dir with default ImageNet training
    """
    CONF = config.get_conf_dict()
    TIMESTAMP = 'default_imagenet'

    # Clear default conf and create custom conf
    for k, v in CONF.items():
        if k in ['general', 'augmentation']:
            continue
        for i, j in v.items():
            CONF[k][i] = None
    CONF['augmentation']['train_mode'] = None

    CONF['model']['modelname'] = 'Xception'
    CONF['model']['image_size'] = 224
    CONF['model']['preprocess_mode'] = model_modes[CONF['model']['modelname']]
    CONF['model']['num_classes'] = 1000
    CONF['dataset']['mean_RGB'] = [123.675, 116.28, 103.53]
    CONF['dataset']['std_RGB'] = [58.395, 57.12, 57.375]

    paths.timestamp = TIMESTAMP
    paths.CONF = CONF

    # Create classes.txt for ImageNet
    fpath = keras.utils.get_file(
        'imagenet_class_index.json',
        'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json',
        cache_subdir='models',
        file_hash='c2c37ea517e94d9795004a39431a14cb')
    with open(fpath) as f:
        classes = json.load(f)
    classes = np.array(list(classes.values()))[:, 1]

    # Create the model
    architecture = getattr(applications, CONF['model']['modelname'])
    img_width, img_height = CONF['model']['image_size'], CONF['model']['image_size']
    model = architecture(weights='imagenet', include_top=True, input_shape=(img_width, img_height, 3))
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

    # Save everything
    utils.create_dir_tree()
    np.savetxt(os.path.join(paths.get_ts_splits_dir(), 'classes.txt'), classes, fmt='%s', delimiter='/n')
    save_conf(CONF)
    model.save(fpath=os.path.join(paths.get_checkpoints_dir(), 'final_model.h5'),
               include_optimizer=False)
Esempio n. 4
0
    stats = {'epoch': history.epoch,
             'training time (s)': round(time.time()-t0, 2),
             'timestamp': TIMESTAMP}
    stats.update(history.history)
    stats = json_friendly(stats)
    stats_dir = paths.get_stats_dir()
    with open(os.path.join(stats_dir, 'stats.json'), 'w') as outfile:
        json.dump(stats, outfile, sort_keys=True, indent=4)

    print('Saving the configuration ...')
    model_utils.save_conf(CONF)

    print('Saving the model to h5...')
    fpath = os.path.join(paths.get_checkpoints_dir(), 'final_model.h5')
    model.save(fpath,
               include_optimizer=False)

    # print('Saving the model to protobuf...')
    # fpath = os.path.join(paths.get_checkpoints_dir(), 'final_model.proto')
    # model_utils.save_to_pb(model, fpath)

    print('Finished')


if __name__ == '__main__':

    CONF = config.get_conf_dict()
    timestamp = datetime.now().strftime('%Y-%m-%d_%H%M%S')

    train_fn(TIMESTAMP=timestamp, CONF=CONF)
Esempio n. 5
0
#Importamos todo lo necesario como en el jupyter 1.0 de Ignacio
import os

import matplotlib.pylab as plt
import numpy as np
from tqdm import tqdm

import imgclas
from imgclas import paths, config
from imgclas.data_utils import load_image, load_data_splits, augment, load_class_names

#Comenzamos a preparar todos los datos

CONF = config.get_conf_dict(
)  #El diccionario con toda la configuracion del yaml
splits_dir = paths.get_splits_dir()  #base+data+dataset_files
# Load the training data
X_train, y_train = load_data_splits(splits_dir=splits_dir,
                                    im_dir=CONF['general']['images_directory'],
                                    split_name='train')

# Load the validation data
if (CONF['training']['use_validation']) and ('val.txt'
                                             in os.listdir(splits_dir)):
    X_val, y_val = load_data_splits(splits_dir=splits_dir,
                                    im_dir=CONF['general']['images_directory'],
                                    split_name='val')
#load_data_splits comprueba que exista el fichero que se le pasa (ya sean train,val etc). luego con numpy.genfromtxt
#obtiene un array donde la primera columna son los path, en la segunda las etiquetas
#por ultimo retorna un array de numpy con los path absolutos a las fotografias de train o el que le hayas pasado
#y otro con las etiquetas en formato int32 para saber de qué clase son