Esempio n. 1
0
def get_fc7(graph_filename, load_filename):
    graph = load_graph(graph_filename)

    #for op in graph.get_operations():
    #    print(op.name, op.values())

    input = graph.get_tensor_by_name('prefix/input:0')
    prob = graph.get_tensor_by_name('prefix/test/prob:0')
    fc7 = graph.get_tensor_by_name('prefix/fc7/fc7:0')
    keep_prob = graph.get_tensor_by_name('prefix/Placeholder_1:0')

    paths, labels = utils.load_paths(load_filename, './')
    features = []
    with tf.Session(graph=graph) as sess:
        # with tf.device('gpu:/0'):
        for i in range(len(paths)):
            path.DataDir.percent_bar(i + 1, len(paths))
            #print(paths[i])
            images = utils.load_inputs(paths[i])
            #print(images)
            images = np.asarray(images, dtype=np.float32)
            out = sess.run(fc7, feed_dict={input: images, keep_prob: 1.})
            features.append(out)
            #print('Gain %d utterance feature' % i)

    return features, labels
Esempio n. 2
0
def main(date, hour):
    if date == 'n':
        date = None
    if hour == 'n':
        hour = None
    abspath = os.path.abspath(__file__)
    dname = os.path.dirname(abspath)
    os.chdir(dname)
    dir = os.getcwd()
    paths = ut.load_paths(dir)
    paths['output'] = os.path.abspath(paths['output'])
    paths['data_store'] = os.path.abspath(paths['data_store'])
    pull_gefs_files(date=date, hour=hour)
    fmean, fsprd = run_fcsts(paths=paths)
    date = pd.to_datetime(fmean['valid_time'][0].values)
    logging.info('mcli started')
    mc_mean, mc_std = run_mcli()
    mc_std = mc_std.dropna(dim='lat')
    mc_mean = mc_mean.dropna(dim='lat')
    fmean, fsprd = align_fmean_fsprd(fmean, fsprd, mc_mean)
    fmean.to_netcdf(f'{paths["output"]}/slp_mean_{date.year}{date.month:02}{date.day:02}_{date.hour:02}z.nc')
    fsprd.to_netcdf(f'{paths["output"]}/slp_sprd_{date.year}{date.month:02}{date.day:02}_{date.hour:02}z.nc')
    logging.info('fmean and spread saved, mcli finished')
    logging.info('percentile started')
    percentile = combine_fcast_and_mcli(fmean, mc_mean)
    gc.collect()
    logging.info('percentile complete')
    subset_sprd = transforms.subset_sprd(percentile, mc_std)
    logging.info('spread subset complete')
    hsa_final, ssa_perc = transforms.hsa(fsprd, subset_sprd)
    ssa_perc.to_netcdf(f'{paths["output"]}/ssa_perc_{date.year}{date.month:02}{date.day:02}_{date.hour:02}z.nc')
    hsa_final.to_netcdf(f'{paths["output"]}/hsa_{date.year}{date.month:02}{date.day:02}_{date.hour:02}z.nc')
    logging.info('hsa and ssa percentile file created')
Esempio n. 3
0
def grid_search(model_generator, param_grid, feature_set):
    print('\nGrid Search')
    # load data
    print('loading', feature_set)
    paths = u.load_paths('PATHS.yaml')  # get paths from file
    train_x, val_x, test_x = u.load_data(paths['extracted_data'] +
                                         'features_%s.p' % feature_set)
    train_y, val_y, test_y = u.load_data(paths['extracted_data'] +
                                         'labels_%s.p' % feature_set)
    test_x = np.vstack(
        (val_x,
         test_x))  # dont use validation set for grid search, add to test data
    test_y = np.vstack((val_y, test_y))

    # train
    # grid search train with 3 fold validation
    classifier = KerasClassifier(model_generator,
                                 n_dim=train_x.shape[1:],
                                 n_labels=test_y.shape[1])
    validator = GridSearchCV(classifier,
                             param_grid=param_grid,
                             scoring='neg_log_loss',
                             verbose=3,
                             n_jobs=1)
    validator.fit(train_x, train_y)

    # results
    search_results(validator, test_x, test_y)
Esempio n. 4
0
def random_search(model_generator, param_dist, n_iter, feature_set):
    print('\nRandomized {[Search witn %d iterations\n' % n_iter)
    # load data
    print('loading', feature_set)
    paths = u.load_paths('PATHS.yaml')  # get paths from file
    train_x, val_x, test_x = u.load_data(paths['extracted_data'] +
                                         'features_%s.p' % feature_set)
    train_y, val_y, test_y = u.load_data(paths['extracted_data'] +
                                         'labels_%s.p' % feature_set)
    test_x = np.vstack(
        (val_x,
         test_x))  # dont use validation set for grid search, add to test data
    test_y = np.vstack((val_y, test_y))

    # train
    # grid search train with 3 fold validation
    classifier = KerasClassifier(model_generator,
                                 n_dim=train_x.shape[1:],
                                 n_labels=test_y.shape[1])
    validator = RandomizedSearchCV(classifier,
                                   param_distributions=param_dist,
                                   n_iter=n_iter,
                                   verbose=3)
    validator.fit(train_x, train_y)

    # results
    search_results(validator, test_x, test_y)
def multiplier(IN_PATH, OUT_PATH, single=False, subfolders=True):

    # Tableau de chemins des fichiers images a traiter
    if single:
        images_path = [IN_PATH]
    else:
        images_path = load_paths(IN_PATH)

    # Creer le dossier de sortie s'il n'existe pas
    makedirs(OUT_PATH, exist_ok=True)

    print('> Multiplication')
    print('- Dossier entree : "%s"' % IN_PATH)
    print('- Dossier sortie : "%s"' % OUT_PATH)

    transform(images_path, OUT_PATH, copy, not single or subfolders)
    transform(images_path, OUT_PATH, flip, not single or subfolders)
    transform(images_path, OUT_PATH, rotation_90, not single or subfolders)
    transform(images_path, OUT_PATH, rotation_180, not single or subfolders)

    # perte couleurs
    transform(images_path, OUT_PATH, grayscale, not single or subfolders)
    transform(images_path, OUT_PATH, invert, not single or subfolders)

    # perte pixels
    transform(images_path, OUT_PATH, crop, not single or subfolders)
    transform(images_path, OUT_PATH, blue, not single or subfolders)


#multiplier('../donnees-projet/Data', './sortie/multiplication')
Esempio n. 6
0
 def __init__(self,
              stat: str,
              variable: str,
              paths: dict = None,
              group: bool = False):
     self.variable = self.convert_variable(variable)
     if stat == 'mean':
         stat = 'geavg'
     elif stat == 'sprd' or stat == 'std':
         stat = 'gespr'
     else:
         raise ValueError('Stat must be mean or sprd/std')
     self.stat = stat
     if paths == None:
         self.paths = ut.load_paths()
     else:
         self.paths = paths
     if group == True:
         self.load_all()
Esempio n. 7
0
def get_data(feature_set):
    # load dataset
    paths = u.load_paths('PATHS.yaml')  # get paths from file
    train_x, val_x, test_x = u.load_data(paths['extracted_data'] +
                                         'features_%s.p' % feature_set)
    train_y, val_y, test_y = u.load_data(paths['extracted_data'] +
                                         'labels_%s.p' % feature_set)
    test_x = np.vstack(
        (val_x,
         test_x))  # dont use validation set for grid search, add to test data
    test_y = np.vstack((val_y, test_y))
    train_y = u.inv_one_hot_encode(train_y)  # remove one hot encoding
    test_y = u.inv_one_hot_encode(test_y)

    if feature_set == u.FEATURE_SET_SPECS_NORM:
        # flatten 128 x 128 image
        length = train_x.shape[1] * train_x.shape[2]
        train_x = train_x.reshape(train_x.shape[0], length)
        test_x = test_x.reshape(test_x.shape[0], length)

    data = {
        'train': {
            'X': train_x,
            'y': train_y
        },
        'test': {
            'X': test_x,
            'y': test_y
        },
        'n_classes': len(np.unique(train_y))
    }

    print("dataset has %i training samples and %i test samples." %
          (len(data['train']['X']), len(data['test']['X'])))

    return data
Esempio n. 8
0
def train_model(model_generator, feature_set):

    # TODO get data function that returns dict (options for one hot or not, val or not)
    # load dataset
    paths = u.load_paths('PATHS.yaml')  # get paths from file
    train_x, val_x, test_x = u.load_data(paths['extracted_data'] +
                                         'features_%s.p' % feature_set)
    train_y, val_y, test_y = u.load_data(paths['extracted_data'] +
                                         'labels_%s.p' % feature_set)

    model_name, model, training = model_generator(n_dim=train_x.shape[1:],
                                                  n_labels=test_y.shape[1])
    run_id = '%s_%s' % (model_name, datetime.datetime.now().isoformat())
    print('\nTrain and Evaluate: %s' % model_name)

    # callbacks
    earlystop = EarlyStopping(monitor='val_loss',
                              patience=training.early_stop_patience,
                              verbose=1,
                              mode='auto')
    log_dir = os.path.join(paths['tensorboard_logs'], run_id)
    tensorboard = TensorBoard(log_dir=log_dir,
                              histogram_freq=3,
                              write_graph=True)
    t0 = time.time()
    history = model.fit(train_x,
                        train_y,
                        validation_data=(val_x, val_y),
                        callbacks=[earlystop, tensorboard],
                        nb_epoch=training.n_epoch,
                        batch_size=training.batch_size)
    training_time = time.time() - t0

    # test
    y_prob = model.predict_proba(test_x, verbose=0)
    y_pred = np_utils.probas_to_classes(y_prob)
    y_true = np.argmax(test_y, 1)

    # evaluate the model's accuracy
    t0 = time.time()
    score, accuracy = model.evaluate(test_x,
                                     test_y,
                                     batch_size=training.batch_size)
    testing_time = time.time() - t0
    cm = confusion_matrix(y_true, y_pred, labels=None)
    # p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average='micro')
    # roc = roc_auc_score(test_y, y_prob)
    # print("F-Score:", round(f, 2))  # similar value to the accuracy score, but useful for cross-checking
    # print("ROC:", round(roc, 3))

    # print results
    print("\nclassifier: %s" % model_name)
    print("training time: %0.4fs" % training_time)
    print("testing time: %0.4fs" % testing_time)
    print("accuracy: %0.4f" % accuracy)
    print("confusion matrix:\n%s" % cm)
    # print model.summary()

    # plot and save results
    fname = paths['model_save'] + model_name + '_accuracy_%0.2f' % accuracy
    u.plot_keras_loss(fname, history)  # saves plot png
    model.save(fname + '.h5')
    cm_path = './confusion_plots/%s' % model_name
    cm_title = '%s (Accuracy: %0.2f)' % (model_name, accuracy)
    u.plot_confusion_matrix(cm, cm_path, title=cm_title)
Esempio n. 9
0
# create database
known_names, known_faces_encoding = utils.create_database(
    '../Images/database/group1')

# load images to test
unknown_names, unknown_faces_encoding = utils.create_database(
    '../Images/test_face/group1')
known_names.append('unknown')

index = [
    utils.recognize_face(unknown_face_encoding, known_faces_encoding)
    for unknown_face_encoding in unknown_faces_encoding
]
names = [known_names[i] for i in index]
utils.compare_result(unknown_names, names)

# results :
# test only group 1 : Result : 1 error out of 18. Accuracy: 0.94
# test only group 2 : Result : 0 error out of 8. Accuracy: 1
# test group 3 (all photos) : Result : 1 error out of 26. Accuracy: 0.96

filepath = '../Images/test_mouth/'
predictor_path = './shape_predictor_68_face_landmarks.dat'

paths = utils.load_paths(filepath)
image_paths = [os.path.join(filepath, f) for f in paths]

for image_path in image_paths:
    mouth_detection.mouth_location_image(image_path, predictor_path)
Esempio n. 10
0
                save_features_Up(features_Vp, weights, train_utterance_file[i])
                print('save speaker %s train features utterance' %
                      DataDir.val_speaker[i])

                test_features = np.load(test_features_file[i])
                features_Vp = get_features_Vp(test_features)
                save_features_Up(features_Vp, weights, test_utterance_file[i])
                print('save speaker %s test features utterance' %
                      DataDir.val_speaker[i])

        # solve weights
        elif sys.argv[1] == '-w':
            print('solve_w')
            train_features = np.load(train_features_file)
            features_Vp = get_features_Vp(train_features)
            paths, labels = utils.load_paths(train_filename, './')
            labels = np.asarray(labels)
            print(features_Vp.shape, labels.shape)
            weights, features_Up = solve_weights(features_Vp, labels)
            weights = np.array([1])
            save_features_Up(features_Up, weights, train_utterance_file)

            test_features = np.load(test_features_file)
            features_Vp = get_features_Vp(test_features)
            paths, labels = utils.load_paths(test_filename, './')
            labels = np.asarray(labels)
            print(features_Vp.shape, labels.shape)
            weights, features_Up = solve_weights(features_Vp, labels)
            weights = np.array([1])
            save_features_Up(features_Up, weights, test_utterance_file)
Esempio n. 11
0
# -*- coding: utf-8 -*-
import sys
sys.path.insert(1, "../traitement-images")

from os import path, makedirs
from PIL import Image as pImage
from PIL import ImageOps
from utils import load_paths, add_suffix
import itertools
from filtres import *

IN_PATH = './200x200'
#IN_PATH = './test'
OUT_PATH = './dataset_exp'

images_origin = load_paths(IN_PATH)


def transform(outpath, paths, fnc):

    result = []

    for image_path in paths:
        image = pImage.open(image_path)
        print('"%s" => %s' % (image_path, fnc.__name__))
        transformed = fnc(image)

        in_path = path.dirname(path.dirname(image_path))

        transformed_path = add_suffix(in_path, outpath, image_path,
                                      '_' + fnc.__name__)
import utils as u

paths = u.load_paths('PATHS.yaml')  # get paths from file
AUDIO_DIR = paths['audio_data']
SAVE_DIR = paths['extracted_data']
TRAIN = [
    'fold1', 'fold2', 'fold3', 'fold4', 'fold5', 'fold6', 'fold7', 'fold8'
]
VAL = ['fold9']
TEST = ['fold10']
SAMPLE = ['fold_sample']


def dump_features(name, extractor):
    print('Extracting %s' % name)
    fnames = ['features_' + name, 'labels_' + name]
    train_features, train_labels = extractor(AUDIO_DIR, TRAIN)
    val_features, val_labels = extractor(AUDIO_DIR, VAL)
    test_features, test_labels = extractor(AUDIO_DIR, TEST)
    data = [(train_features, val_features, test_features),
            (train_labels, val_labels, test_labels)]
    u.dump_data(SAVE_DIR, data, fnames)


def dump_sample(name, extractor):
    print('Extracting %s' % name)
    fnames = ['features_' + name, 'labels_' + name]
    samp_features, samp_labels = extractor(AUDIO_DIR, SAMPLE)
    data = [(samp_features), (samp_labels)]
    u.dump_data(SAVE_DIR, data, fnames)