# Import the required cbrain functions
#from cbrain.imports import *
import os 
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from imports import *
from data_generator import *
from models import *
import time
from utils import limit_mem


t0 = time.time()

# If you are running on the GPU, execute this
# Otherwise tensorflow will use ALL your GPU RAM for no reason
limit_mem()

DATADIR = 'Preprocessed_Data/SPCAM5_12_Months/'

train_gen = DataGenerator(
    data_dir=DATADIR, 
    feature_fn='full_physics_essentials_train_month01_shuffle_features.nc',
    target_fn='full_physics_essentials_train_month01_shuffle_targets.nc',
    batch_size=512,
    norm_fn='full_physics_essentials_train_month01_norm.nc',
    fsub='feature_means', 
    fdiv='feature_stds', 
    tmult='target_conv',
    shuffle=True,
)
Пример #2
0
                        dest='predict_test_file')
    parser.add_argument('--nn', required=True)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--n-est', type=int, default=10, dest='n_est')
    parser.add_argument('--lrate', type=float, default=1e-3)
    parser.add_argument('--early-stop', type=int, dest='n_stop')
    parser.add_argument('--batch-size', type=int, dest='batch_size')
    parser.add_argument('--log-file', required=True, dest='log_file')

    args = parser.parse_args()

    logging.basicConfig(format='%(asctime)s   %(levelname)s   %(message)s',
                        level=logging.DEBUG,
                        filename=args.log_file,
                        datefmt='%Y-%m-%d %H:%M:%S')

    start = time.time()
    limit_mem(args.gpu)
    train_predict(train_file=args.train_file,
                  test_file=args.test_file,
                  model_file=args.model_file,
                  predict_valid_file=args.predict_valid_file,
                  predict_test_file=args.predict_test_file,
                  nn=args.nn,
                  n_est=args.n_est,
                  lrate=args.lrate,
                  n_stop=args.n_stop,
                  batch_size=args.batch_size)
    logging.info('finished ({:.2f} min elasped)'.format(
        (time.time() - start) / 60))
Пример #3
0
def train_vgg16(X_train, X_angle, target_train, X_test, X_test_angle, K):
    print("Running vgg16")
    folds = list(
        StratifiedKFold(n_splits=K, shuffle=True,
                        random_state=16).split(X_train, target_train))
    y_test_pred_log = 0
    y_train_pred_log = 0
    y_valid_pred_log = 0.0 * target_train
    for j, (train_idx, test_idx) in enumerate(folds):
        print('\n===================FOLD=', j + 1)
        limit_mem()
        X_train_cv = X_train[train_idx]
        y_train_cv = target_train[train_idx]
        X_holdout = X_train[test_idx]
        Y_holdout = target_train[test_idx]

        #Angle
        X_angle_cv = X_angle[train_idx]
        X_angle_hold = X_angle[test_idx]

        #define file path and get callbacks
        file_path = "%s_aug_model_weights.hdf5" % j
        callbacks = get_callbacks(filepath=file_path, patience=10)
        gen_flow = gen_flow_for_two_inputs(X_train_cv, X_angle_cv, y_train_cv)
        galaxyModel = Vgg16Model()
        galaxyModel.fit_generator(gen_flow,
                                  steps_per_epoch=24,
                                  epochs=100,
                                  shuffle=True,
                                  verbose=1,
                                  validation_data=([X_holdout,
                                                    X_angle_hold], Y_holdout),
                                  callbacks=callbacks)

        #Getting the Best Model
        galaxyModel.load_weights(filepath=file_path)
        #Getting Training Score
        score = galaxyModel.evaluate([X_train_cv, X_angle_cv],
                                     y_train_cv,
                                     verbose=0)
        print('Train loss:', score[0])
        print('Train accuracy:', score[1])
        #Getting Test Score
        score = galaxyModel.evaluate([X_holdout, X_angle_hold],
                                     Y_holdout,
                                     verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

        #Getting validation Score.
        pred_valid = galaxyModel.predict([X_holdout, X_angle_hold])
        y_valid_pred_log[test_idx] = pred_valid.reshape(pred_valid.shape[0])

        #Getting Test Scores
        temp_test = galaxyModel.predict([X_test, X_test_angle])
        y_test_pred_log += temp_test.reshape(temp_test.shape[0])

        #Getting Train Scores
        temp_train = galaxyModel.predict([X_train, X_angle])
        y_train_pred_log += temp_train.reshape(temp_train.shape[0])

    y_test_pred_log = y_test_pred_log / K
    y_train_pred_log = y_train_pred_log / K

    print('\n Train Log Loss Validation= ',
          log_loss(target_train, y_train_pred_log))
    print(' Test Log Loss Validation= ',
          log_loss(target_train, y_valid_pred_log))
    return y_test_pred_log
Пример #4
0
from scipy.ndimage import filters

from keras import backend as K
from keras import metrics
from keras.models import Model, Input
from keras.layers import Lambda
from keras.applications.vgg16 import VGG16



%matplotlib inline
plt.rcParams['figure.figsize'] = (3, 3)

# %% Limit GPU memory usage
################################################################################
utils.limit_mem()

# %% Set paths
################################################################################
path = '/home/riley/Work/fast_ai/2/data/imgnet_smpl/'

# %% Load data
################################################################################
n_imgs = 1000
arr_lr = bcolz.open(path+'trn_resized_72.bc')[:n_imgs]
arr_hr = bcolz.open(path+'trn_resized_288.bc')[:n_imgs]

# %% Define super-res model
################################################################################
inp = Input(arr_lr.shape[1:])
x = conv_block(inp, filters=64, kernel_size=(9, 9), strides=(1, 1))