def train_model(gParameters, models, X_train, Y_train, X_test, Y_test, fold, verbose=False): base_run_id = gParameters['run_id'] for epoch in range( gParameters['epochs'] ): for k in range( len( models ) ): model = models[ k ] gParameters['run_id'] = base_run_id + ".{}.{}.{}".format(fold, epoch, k) candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters) timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout']) model.fit( { 'input': X_train[k] }, { 'out_' + str( k ) : Y_train[k] }, epochs=1, verbose=verbose, callbacks= [ candleRemoteMonitor, timeoutMonitor ], batch_size= gParameters['batch_size'], validation_data= ( X_test[k], Y_test[k] ) ) return models
def run(gParameters): print('Params:', gParameters) file_train = gParameters['train_data'] file_test = gParameters['test_data'] url = gParameters['data_url'] train_file = candle.get_file(file_train, url + file_train, cache_subdir='Pilot1') test_file = candle.get_file(file_test, url + file_test, cache_subdir='Pilot1') X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Y_train shape:', Y_train.shape) print('Y_test shape:', Y_test.shape) x_train_len = X_train.shape[1] # this reshaping is critical for the Conv1D to work X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) # Have to add this line or else ALW on 2020-11-15 finds Supervisor jobs using canonically CANDLE-compliant model scripts die as soon as a particular task is used a second time: # EXCEPTION: # InvalidArgumentError() ... # File "<string>", line 23, in <module> # File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Supervisor/workflows/common/python/model_runner.py", line 241, in run_model # result, history = run(hyper_parameter_map, obj_return) # File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Supervisor/workflows/common/python/model_runner.py", line 169, in run # history = pkg.run(params) # File "/gpfs/alpine/med106/world-shared/candle/2020-11-11/checkouts/Benchmarks/Pilot1/NT3/nt3_candle_wrappers_baseline_keras2.py", line 211, in run # callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor]) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/engine/training.py", line 1178, in fit # validation_freq=validation_freq) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 204, in fit_loop # outs = fit_function(ins_batch) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2979, in __call__ # return self._call(inputs) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2933, in _call # session) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2885, in _make_callable # callable_fn = session._make_callable_from_options(callable_opts) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/tensorflow_core/python/client/session.py", line 1505, in _make_callable_from_options # return BaseSession._Callable(self, callable_options) # File "/gpfs/alpine/world-shared/med106/sw/condaenv-200408/lib/python3.6/site-packages/tensorflow_core/python/client/session.py", line 1460, in __init__ # session._session, options_ptr) K.clear_session() model = Sequential() layer_list = list(range(0, len(gParameters['conv']), 3)) for _, i in enumerate(layer_list): filters = gParameters['conv'][i] filter_len = gParameters['conv'][i + 1] stride = gParameters['conv'][i + 2] print(int(i / 3), filters, filter_len, stride) if gParameters['pool']: pool_list = gParameters['pool'] if type(pool_list) != list: pool_list = list(pool_list) if filters <= 0 or filter_len <= 0 or stride <= 0: break if 'locally_connected' in gParameters: model.add( LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1))) else: # input layer if i == 0: model.add( Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1))) else: model.add( Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid')) model.add(Activation(gParameters['activation'])) if gParameters['pool']: model.add(MaxPooling1D(pool_size=pool_list[int(i / 3)])) model.add(Flatten()) for layer in gParameters['dense']: if layer: model.add(Dense(layer)) model.add(Activation(gParameters['activation'])) if gParameters['dropout']: model.add(Dropout(gParameters['dropout'])) model.add(Dense(gParameters['classes'])) model.add(Activation(gParameters['out_activation'])) # Reference case # model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1))) # model.add(Activation('relu')) # model.add(MaxPooling1D(pool_size=1)) # model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid')) # model.add(Activation('relu')) # model.add(MaxPooling1D(pool_size=10)) # model.add(Flatten()) # model.add(Dense(200)) # model.add(Activation('relu')) # model.add(Dropout(0.1)) # model.add(Dense(20)) # model.add(Activation('relu')) # model.add(Dropout(0.1)) # model.add(Dense(CLASSES)) # model.add(Activation('softmax')) kerasDefaults = candle.keras_default_config() # Define optimizer optimizer = candle.build_optimizer(gParameters['optimizer'], gParameters['learning_rate'], kerasDefaults) model.summary() model.compile(loss=gParameters['loss'], optimizer=optimizer, metrics=[gParameters['metrics']]) output_dir = gParameters['output_dir'] if not os.path.exists(output_dir): os.makedirs(output_dir) # calculate trainable and non-trainable params gParameters.update(candle.compute_trainable_params(model)) # set up a bunch of callbacks to do work during model training.. model_name = gParameters['model_name'] path = '{}/{}.autosave.model.h5'.format(output_dir, model_name) # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True) csv_logger = CSVLogger('{}/training.log'.format(output_dir)) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters) timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout']) history = model.fit( X_train, Y_train, batch_size=gParameters['batch_size'], epochs=gParameters['epochs'], verbose=1, validation_data=(X_test, Y_test), callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor]) score = model.evaluate(X_test, Y_test, verbose=0) if False: print('Test score:', score[0]) print('Test accuracy:', score[1]) # serialize model to JSON model_json = model.to_json() with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file: json_file.write(model_json) # serialize model to YAML model_yaml = model.to_yaml() with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file: yaml_file.write(model_yaml) # serialize weights to HDF5 model.save_weights("{}/{}.weights.h5".format(output_dir, model_name)) print("Saved model to disk") # load json and create model json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model_json = model_from_json(loaded_model_json) # load yaml and create model yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r') loaded_model_yaml = yaml_file.read() yaml_file.close() loaded_model_yaml = model_from_yaml(loaded_model_yaml) # load weights into new model loaded_model_json.load_weights('{}/{}.weights.h5'.format( output_dir, model_name)) print("Loaded json model from disk") # evaluate json loaded model on test data loaded_model_json.compile(loss=gParameters['loss'], optimizer=gParameters['optimizer'], metrics=[gParameters['metrics']]) score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0) print('json Test score:', score_json[0]) print('json Test accuracy:', score_json[1]) print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1] * 100)) # load weights into new model loaded_model_yaml.load_weights('{}/{}.weights.h5'.format( output_dir, model_name)) print("Loaded yaml model from disk") # evaluate loaded model on test data loaded_model_yaml.compile(loss=gParameters['loss'], optimizer=gParameters['optimizer'], metrics=[gParameters['metrics']]) score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0) print('yaml Test score:', score_yaml[0]) print('yaml Test accuracy:', score_yaml[1]) print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100)) return history
def run(params): args = Struct(**params) set_seed(args.rng_seed) ext = extension_from_parameters(args) verify_path(args.save_path) prefix = args.save_path + ext logfile = args.logfile if args.logfile else prefix + '.log' set_up_logger(logfile, args.verbose) logger.info('Params: {}'.format(params)) if (len(args.gpus) > 0): import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = ",".join(map(str, args.gpus)) K.set_session(tf.Session(config=config)) loader = CombinedDataLoader(seed=args.rng_seed) loader.load( cache=args.cache, ncols=args.feature_subsample, agg_dose=args.agg_dose, cell_features=args.cell_features, drug_features=args.drug_features, drug_median_response_min=args.drug_median_response_min, drug_median_response_max=args.drug_median_response_max, use_landmark_genes=args.use_landmark_genes, use_filtered_genes=args.use_filtered_genes, cell_feature_subset_path=args.cell_feature_subset_path or args.feature_subset_path, drug_feature_subset_path=args.drug_feature_subset_path or args.feature_subset_path, preprocess_rnaseq=args.preprocess_rnaseq, single=args.single, train_sources=args.train_sources, test_sources=args.test_sources, embed_feature_source=not args.no_feature_source, encode_response_source=not args.no_response_source, ) target = args.agg_dose or 'Growth' val_split = args.validation_split train_split = 1 - val_split if args.export_csv: fname = args.export_csv loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split, cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug, cell_subset_path=args.cell_subset_path, drug_subset_path=args.drug_subset_path) train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle) val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle) x_train_list, y_train = train_gen.get_slice(size=train_gen.size, dataframe=True, single=args.single) x_val_list, y_val = val_gen.get_slice(size=val_gen.size, dataframe=True, single=args.single) df_train = pd.concat([y_train] + x_train_list, axis=1) df_val = pd.concat([y_val] + x_val_list, axis=1) df = pd.concat([df_train, df_val]).reset_index(drop=True) if args.growth_bins > 1: df = uno_data.discretize(df, 'Growth', bins=args.growth_bins) df.to_csv(fname, sep='\t', index=False, float_format="%.3g") return if args.export_data: fname = args.export_data loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split, cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug, cell_subset_path=args.cell_subset_path, drug_subset_path=args.drug_subset_path) train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle) val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle) store = pd.HDFStore(fname, complevel=9, complib='blosc:snappy') config_min_itemsize = {'Sample': 30, 'Drug1': 10} if not args.single: config_min_itemsize['Drug2'] = 10 for partition in ['train', 'val']: gen = train_gen if partition == 'train' else val_gen for i in range(gen.steps): x_list, y = gen.get_slice(size=args.batch_size, dataframe=True, single=args.single) for j, input_feature in enumerate(x_list): input_feature.columns = [''] * len(input_feature.columns) store.append('x_{}_{}'.format(partition, j), input_feature.astype('float32'), format='table', data_column=True) store.append('y_{}'.format(partition), y.astype({target: 'float32'}), format='table', data_column=True, min_itemsize=config_min_itemsize) logger.info('Generating {} dataset. {} / {}'.format( partition, i, gen.steps)) store.close() logger.info('Completed generating {}'.format(fname)) return loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split, cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug, cell_subset_path=args.cell_subset_path, drug_subset_path=args.drug_subset_path) model = build_model(loader, args) logger.info('Combined model:') model.summary(print_fn=logger.info) # plot_model(model, to_file=prefix+'.model.png', show_shapes=True) if args.cp: model_json = model.to_json() with open(prefix + '.model.json', 'w') as f: print(model_json, file=f) def warmup_scheduler(epoch): lr = args.learning_rate or base_lr * args.batch_size / 100 if epoch <= 5: K.set_value(model.optimizer.lr, (base_lr * (5 - epoch) + lr * epoch) / 5) logger.debug('Epoch {}: lr={:.5g}'.format( epoch, K.get_value(model.optimizer.lr))) return K.get_value(model.optimizer.lr) df_pred_list = [] cv_ext = '' cv = args.cv if args.cv > 1 else 1 for fold in range(cv): if args.cv > 1: logger.info('Cross validation fold {}/{}:'.format(fold + 1, cv)) cv_ext = '.cv{}'.format(fold + 1) template_model = build_model(loader, args, silent=True) if args.initial_weights: logger.info("Loading weights from {}".format(args.initial_weights)) template_model.load_weights(args.initial_weights) if len(args.gpus) > 1: from keras.utils import multi_gpu_model gpu_count = len(args.gpus) logger.info("Multi GPU with {} gpus".format(gpu_count)) model = multi_gpu_model(template_model, cpu_merge=False, gpus=gpu_count) else: model = template_model optimizer = optimizers.deserialize({ 'class_name': args.optimizer, 'config': {} }) base_lr = args.base_lr or K.get_value(optimizer.lr) if args.learning_rate: K.set_value(optimizer.lr, args.learning_rate) model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2]) # calculate trainable and non-trainable params params.update(candle.compute_trainable_params(model)) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params['timeout']) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001) warmup_lr = LearningRateScheduler(warmup_scheduler) checkpointer = MultiGPUCheckpoint(prefix + cv_ext + '.model.h5', save_best_only=True) tensorboard = TensorBoard( log_dir="tb/{}{}{}".format(args.tb_prefix, ext, cv_ext)) history_logger = LoggingCallback(logger.debug) callbacks = [candle_monitor, timeout_monitor, history_logger] if args.reduce_lr: callbacks.append(reduce_lr) if args.warmup_lr: callbacks.append(warmup_lr) if args.cp: callbacks.append(checkpointer) if args.tb: callbacks.append(tensorboard) if args.save_weights: callbacks.append( SimpleWeightSaver(args.save_path + '/' + args.save_weights)) if args.use_exported_data is not None: train_gen = DataFeeder(filename=args.use_exported_data, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single, agg_dose=args.agg_dose) val_gen = DataFeeder(partition='val', filename=args.use_exported_data, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single, agg_dose=args.agg_dose) else: train_gen = CombinedDataGenerator(loader, fold=fold, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single) val_gen = CombinedDataGenerator(loader, partition='val', fold=fold, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single) df_val = val_gen.get_response(copy=True) y_val = df_val[target].values y_shuf = np.random.permutation(y_val) log_evaluation(evaluate_prediction(y_val, y_shuf), description='Between random pairs in y_val:') if args.no_gen: x_train_list, y_train = train_gen.get_slice(size=train_gen.size, single=args.single) x_val_list, y_val = val_gen.get_slice(size=val_gen.size, single=args.single) history = model.fit(x_train_list, y_train, batch_size=args.batch_size, epochs=args.epochs, callbacks=callbacks, validation_data=(x_val_list, y_val)) else: logger.info('Data points per epoch: train = %d, val = %d', train_gen.size, val_gen.size) logger.info('Steps per epoch: train = %d, val = %d', train_gen.steps, val_gen.steps) history = model.fit_generator(train_gen, train_gen.steps, epochs=args.epochs, callbacks=callbacks, validation_data=val_gen, validation_steps=val_gen.steps) if args.no_gen: y_val_pred = model.predict(x_val_list, batch_size=args.batch_size) else: val_gen.reset() y_val_pred = model.predict_generator(val_gen, val_gen.steps + 1) y_val_pred = y_val_pred[:val_gen.size] y_val_pred = y_val_pred.flatten() scores = evaluate_prediction(y_val, y_val_pred) log_evaluation(scores) # df_val = df_val.assign(PredictedGrowth=y_val_pred, GrowthError=y_val_pred - y_val) df_val['Predicted' + target] = y_val_pred df_val[target + 'Error'] = y_val_pred - y_val df_pred_list.append(df_val) if hasattr(history, 'loss'): plot_history(prefix, history, 'loss') if hasattr(history, 'r2'): plot_history(prefix, history, 'r2') pred_fname = prefix + '.predicted.tsv' df_pred = pd.concat(df_pred_list) if args.agg_dose: if args.single: df_pred.sort_values(['Sample', 'Drug1', target], inplace=True) else: df_pred.sort_values(['Source', 'Sample', 'Drug1', 'Drug2', target], inplace=True) else: if args.single: df_pred.sort_values(['Sample', 'Drug1', 'Dose1', 'Growth'], inplace=True) else: df_pred.sort_values( ['Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'], inplace=True) df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g') if args.cv > 1: scores = evaluate_prediction(df_pred[target], df_pred['Predicted' + target]) log_evaluation(scores, description='Combining cross validation folds:') for test_source in loader.test_sep_sources: test_gen = CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size, source=test_source) df_test = test_gen.get_response(copy=True) y_test = df_test[target].values n_test = len(y_test) if n_test == 0: continue if args.no_gen: x_test_list, y_test = test_gen.get_slice(size=test_gen.size, single=args.single) y_test_pred = model.predict(x_test_list, batch_size=args.batch_size) else: y_test_pred = model.predict_generator( test_gen.flow(single=args.single), test_gen.steps) y_test_pred = y_test_pred[:test_gen.size] y_test_pred = y_test_pred.flatten() scores = evaluate_prediction(y_test, y_test_pred) log_evaluation(scores, description='Testing on data from {} ({})'.format( test_source, n_test)) if K.backend() == 'tensorflow': K.clear_session() logger.handlers = [] return history
def run(GP): # set the seed if GP['rng_seed']: np.random.seed(GP['rng_seed']) else: np.random.seed(np.random.randint(10000)) # Set paths if not os.path.isdir(GP['home_dir']): print('Keras home directory not set') sys.exit(0) sys.path.append(GP['home_dir']) # Setup loggin args = candle.ArgumentStruct(**GP) # set_seed(args.rng_seed) # ext = extension_from_parameters(args) candle.verify_path(args.save_path) prefix = args.save_path # + ext logfile = args.logfile if args.logfile else prefix + '.log' candle.set_up_logger(logfile, logger, False) # args.verbose logger.info('Params: {}'.format(GP)) import p2b1 as hf reload(hf) # import keras_model_utils as KEU # reload(KEU) # reload(p2ck) # reload(p2ck.optimizers) maps = hf.autoencoder_preprocess() from keras.optimizers import SGD, RMSprop, Adam from keras.datasets import mnist from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras import callbacks from keras.layers.advanced_activations import ELU from keras.preprocessing.image import ImageDataGenerator # GP=hf.ReadConfig(opts.config_file) batch_size = GP['batch_size'] learning_rate = GP['learning_rate'] kerasDefaults = candle.keras_default_config() # #### Read Data ######## import helper (data_files, fields) = p2b1.get_list_of_data_files(GP) # Read from local directoy # (data_files, fields) = helper.get_local_files('/p/gscratchr/brainusr/datasets/cancer/pilot2/3k_run16_10us.35fs-DPPC.20-DIPC.60-CHOL.20.dir/') # (data_files, fields) = helper.get_local_files('3k_run16', '/p/lscratchf/brainusr/datasets/cancer/pilot2/') # Define datagenerator datagen = hf.ImageNoiseDataGenerator(corruption_level=GP['noise_factor']) # get data dimension ## num_samples = 0 for f in data_files: # Seperate different arrays from the data (X, nbrs, resnums) = helper.get_data_arrays(f) num_samples += X.shape[0] (X, nbrs, resnums) = helper.get_data_arrays(data_files[0]) print('\nData chunk shape: ', X.shape) molecular_hidden_layers = GP['molecular_num_hidden'] if not molecular_hidden_layers: X_train = hf.get_data(X, case=GP['case']) input_dim = X_train.shape[1] else: # computing input dimension for outer AE input_dim = X.shape[1] * molecular_hidden_layers[-1] print('\nState AE input/output dimension: ', input_dim) # get data dimension for molecular autoencoder molecular_nbrs = np.int(GP['molecular_nbrs']) num_molecules = X.shape[1] num_beads = X.shape[2] if GP['nbr_type'] == 'relative': # relative x, y, z positions num_loc_features = 3 loc_feat_vect = ['rel_x', 'rel_y', 'rel_z'] elif GP['nbr_type'] == 'invariant': # relative distance and angle num_loc_features = 2 loc_feat_vect = ['rel_dist', 'rel_angle'] else: print('Invalid nbr_type!!') exit() if not GP['type_bool']: # only consider molecular location coordinates num_type_features = 0 type_feat_vect = [] else: num_type_features = 5 type_feat_vect = list(fields.keys())[3:8] num_features = num_loc_features + num_type_features + num_beads dim = np.prod([num_beads, num_features, molecular_nbrs + 1]) bead_kernel_size = num_features molecular_input_dim = dim mol_kernel_size = num_beads feature_vector = loc_feat_vect + type_feat_vect + list(fields.keys())[8:] print('\nMolecular AE input/output dimension: ', molecular_input_dim) print( '\nData Format:\n[Frames (%s), Molecules (%s), Beads (%s), %s (%s)]' % (num_samples, num_molecules, num_beads, feature_vector, num_features)) # ## Define Model, Solver and Compile ########## print('\nDefine the model and compile') opt = candle.build_optimizer(GP['optimizer'], learning_rate, kerasDefaults) model_type = 'mlp' memo = '%s_%s' % (GP['base_memo'], model_type) # ####### Define Molecular Model, Solver and Compile ######### molecular_nonlinearity = GP['molecular_nonlinearity'] len_molecular_hidden_layers = len(molecular_hidden_layers) conv_bool = GP['conv_bool'] full_conv_bool = GP['full_conv_bool'] if conv_bool: molecular_model, molecular_encoder = AE_models.conv_dense_mol_auto( bead_k_size=bead_kernel_size, mol_k_size=mol_kernel_size, weights_path=None, input_shape=(1, molecular_input_dim, 1), nonlinearity=molecular_nonlinearity, hidden_layers=molecular_hidden_layers, l2_reg=GP['l2_reg'], drop=float(GP['dropout'])) elif full_conv_bool: molecular_model, molecular_encoder = AE_models.full_conv_mol_auto( bead_k_size=bead_kernel_size, mol_k_size=mol_kernel_size, weights_path=None, input_shape=(1, molecular_input_dim, 1), nonlinearity=molecular_nonlinearity, hidden_layers=molecular_hidden_layers, l2_reg=GP['l2_reg'], drop=float(GP['dropout'])) else: molecular_model, molecular_encoder = AE_models.dense_auto( weights_path=None, input_shape=(molecular_input_dim, ), nonlinearity=molecular_nonlinearity, hidden_layers=molecular_hidden_layers, l2_reg=GP['l2_reg'], drop=float(GP['dropout'])) if GP['loss'] == 'mse': loss_func = 'mse' elif GP['loss'] == 'custom': loss_func = helper.combined_loss molecular_model.compile( optimizer=opt, loss=loss_func, metrics=['mean_squared_error', 'mean_absolute_error']) print('\nModel Summary: \n') molecular_model.summary() # #### set up callbacks and cooling for the molecular_model ########## drop = GP['dropout'] mb_epochs = GP['epochs'] initial_lrate = GP['learning_rate'] epochs_drop = 1 + int(np.floor(mb_epochs / 3)) def step_decay(epoch): global initial_lrate, epochs_drop, drop lrate = initial_lrate * np.power(drop, np.floor((1 + epoch) / epochs_drop)) return lrate lr_scheduler = LearningRateScheduler(step_decay) history = callbacks.History() # callbacks=[history,lr_scheduler] history_logger = candle.LoggingCallback(logger.debug) candleRemoteMonitor = candle.CandleRemoteMonitor(params=GP) timeoutMonitor = candle.TerminateOnTimeOut(TIMEOUT) callbacks = [history, history_logger, candleRemoteMonitor, timeoutMonitor] loss = 0. # ### Save the Model to disk if GP['save_path'] is not None: save_path = GP['save_path'] if not os.path.exists(save_path): os.makedirs(save_path) else: save_path = '.' model_json = molecular_model.to_json() with open(save_path + '/model.json', "w") as json_file: json_file.write(model_json) encoder_json = molecular_encoder.to_json() with open(save_path + '/encoder.json', "w") as json_file: json_file.write(encoder_json) print('Saved model to disk') # ### Train the Model if GP['train_bool']: ct = hf.Candle_Molecular_Train( molecular_model, molecular_encoder, data_files, mb_epochs, callbacks, batch_size=batch_size, nbr_type=GP['nbr_type'], save_path=GP['save_path'], len_molecular_hidden_layers=len_molecular_hidden_layers, molecular_nbrs=molecular_nbrs, conv_bool=conv_bool, full_conv_bool=full_conv_bool, type_bool=GP['type_bool'], sampling_density=GP['sampling_density']) frame_loss, frame_mse = ct.train_ac() else: frame_mse = [] frame_loss = [] return frame_loss, frame_mse
metrics=[candle_params['metrics']]) output_dir = candle_params['save'] if not os.path.exists(output_dir): os.makedirs(output_dir) # calculate trainable and non-trainable params candle_params.update(candle.compute_trainable_params(model)) # set up a bunch of callbacks to do work during model training.. model_name = candle_params['model_name'] path = '{}/{}.autosave.model.h5'.format(output_dir, model_name) csv_logger = CSVLogger('{}/training.log'.format(output_dir)) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) candleRemoteMonitor = candle.CandleRemoteMonitor(params=candle_params) timeoutMonitor = candle.TerminateOnTimeOut(candle_params['timeout']) history2 = model.fit(X_train, Y_train, batch_size=candle_params['batch_size'], epochs=candle_params['epochs'], verbose=1, validation_data=(X_test, Y_test), callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor]) score = model.evaluate(X_test, Y_test, verbose=0) candle_value_to_return = score[0] print(model.metrics_names) print(score)
def run(params): args = Struct(**params) set_seed(args.rng_seed) ext = extension_from_parameters(args) verify_path(args.save_path) prefix = args.save_path + ext logfile = args.logfile if args.logfile else prefix + '.log' set_up_logger(logfile, args.verbose) logger.info('Params: {}'.format(params)) loader = ComboDataLoader(seed=args.rng_seed, val_split=args.validation_split, cell_features=args.cell_features, drug_features=args.drug_features, use_mean_growth=args.use_mean_growth, response_url=args.response_url, use_landmark_genes=args.use_landmark_genes, preprocess_rnaseq=args.preprocess_rnaseq, exclude_cells=args.exclude_cells, exclude_drugs=args.exclude_drugs, use_combo_score=args.use_combo_score, cv_partition=args.cv_partition, cv=args.cv) # test_loader(loader) # test_generator(loader) train_gen = ComboDataGenerator(loader, batch_size=args.batch_size).flow() val_gen = ComboDataGenerator(loader, partition='val', batch_size=args.batch_size).flow() train_steps = int(loader.n_train / args.batch_size) val_steps = int(loader.n_val / args.batch_size) model = build_model(loader, args, verbose=True) model.summary() # candle.plot_model(model, to_file=prefix+'.model.png', show_shapes=True) if args.cp: model_json = model.to_json() with open(prefix + '.model.json', 'w') as f: print(model_json, file=f) def warmup_scheduler(epoch): lr = args.learning_rate or base_lr * args.batch_size / 100 if epoch <= 5: K.set_value(model.optimizer.lr, (base_lr * (5 - epoch) + lr * epoch) / 5) logger.debug('Epoch {}: lr={}'.format(epoch, K.get_value(model.optimizer.lr))) return K.get_value(model.optimizer.lr) df_pred_list = [] cv_ext = '' cv = args.cv if args.cv > 1 else 1 fold = 0 while fold < cv: if args.cv > 1: logger.info('Cross validation fold {}/{}:'.format(fold + 1, cv)) cv_ext = '.cv{}'.format(fold + 1) model = build_model(loader, args) optimizer = optimizers.deserialize({ 'class_name': args.optimizer, 'config': {} }) base_lr = args.base_lr or K.get_value(optimizer.lr) if args.learning_rate: K.set_value(optimizer.lr, args.learning_rate) model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2]) # calculate trainable and non-trainable params params.update(candle.compute_trainable_params(model)) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params['timeout']) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001) warmup_lr = LearningRateScheduler(warmup_scheduler) checkpointer = ModelCheckpoint(prefix + cv_ext + '.weights.h5', save_best_only=True, save_weights_only=True) tensorboard = TensorBoard(log_dir="tb/tb{}{}".format(ext, cv_ext)) history_logger = LoggingCallback(logger.debug) model_recorder = ModelRecorder() # callbacks = [history_logger, model_recorder] callbacks = [ candle_monitor, timeout_monitor, history_logger, model_recorder ] if args.reduce_lr: callbacks.append(reduce_lr) if args.warmup_lr: callbacks.append(warmup_lr) if args.cp: callbacks.append(checkpointer) if args.tb: callbacks.append(tensorboard) if args.gen: history = model.fit_generator(train_gen, train_steps, epochs=args.epochs, callbacks=callbacks, validation_data=val_gen, validation_steps=val_steps) fold += 1 else: if args.cv > 1: x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data_cv( fold) else: x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data( ) y_shuf = np.random.permutation(y_val) log_evaluation(evaluate_prediction(y_val, y_shuf), description='Between random pairs in y_val:') history = model.fit(x_train_list, y_train, batch_size=args.batch_size, shuffle=args.shuffle, epochs=args.epochs, callbacks=callbacks, validation_data=(x_val_list, y_val)) if args.cp: model.load_weights(prefix + cv_ext + '.weights.h5') if not args.gen: y_val_pred = model.predict(x_val_list, batch_size=args.batch_size).flatten() scores = evaluate_prediction(y_val, y_val_pred) if args.cv > 1 and scores[args.loss] > args.max_val_loss: logger.warn( 'Best val_loss {} is greater than {}; retrain the model...' .format(scores[args.loss], args.max_val_loss)) continue else: fold += 1 log_evaluation(scores) df_val.is_copy = False df_val['GROWTH_PRED'] = y_val_pred df_val['GROWTH_ERROR'] = y_val_pred - y_val df_pred_list.append(df_val) if args.cp: # model.save(prefix+'.model.h5') model_recorder.best_model.save(prefix + '.model.h5') # test reloadded model prediction # new_model = keras.models.load_model(prefix+'.model.h5') # new_model.load_weights(prefix+cv_ext+'.weights.h5') # new_pred = new_model.predict(x_val_list, batch_size=args.batch_size).flatten() # print('y_val:', y_val[:10]) # print('old_pred:', y_val_pred[:10]) # print('new_pred:', new_pred[:10]) candle.plot_history(prefix, history, 'loss') candle.plot_history(prefix, history, 'r2') if K.backend() == 'tensorflow': K.clear_session() if not args.gen: if args.use_combo_score: pred_fname = prefix + '.predicted.score.tsv' elif args.use_mean_growth: pred_fname = prefix + '.predicted.mean.growth.tsv' else: pred_fname = prefix + '.predicted.growth.tsv' df_pred = pd.concat(df_pred_list) df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g') logger.handlers = [] return history
def run(gParameters): """ Runs the model using the specified set of parameters Args: gParameters: a python dictionary containing the parameters (e.g. epoch) to run the model with. """ # if 'dense' in gParameters: dval = gParameters['dense'] if type(dval) != list: res = list(dval) #try: #is_str = isinstance(dval, basestring) #except NameError: #is_str = isinstance(dval, str) #if is_str: #res = str2lst(dval) gParameters['dense'] = res print(gParameters['dense']) if 'conv' in gParameters: #conv_list = p1_common.parse_conv_list(gParameters['conv']) #cval = gParameters['conv'] #try: #is_str = isinstance(cval, basestring) #except NameError: #is_str = isinstance(cval, str) #if is_str: #res = str2lst(cval) #gParameters['conv'] = res print('Conv input', gParameters['conv']) # print('Params:', gParameters) # Construct extension to save model ext = benchmark.extension_from_parameters(gParameters, '.keras') logfile = gParameters['logfile'] if gParameters[ 'logfile'] else gParameters['output_dir'] + ext + '.log' fh = logging.FileHandler(logfile) fh.setFormatter( logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")) fh.setLevel(logging.DEBUG) sh = logging.StreamHandler() sh.setFormatter(logging.Formatter('')) sh.setLevel(logging.DEBUG if gParameters['verbose'] else logging.INFO) benchmark.logger.setLevel(logging.DEBUG) benchmark.logger.addHandler(fh) benchmark.logger.addHandler(sh) benchmark.logger.info('Params: {}'.format(gParameters)) # Get default parameters for initialization and optimizer functions kerasDefaults = candle.keras_default_config() seed = gParameters['rng_seed'] # Build dataset loader object loader = benchmark.DataLoader( seed=seed, dtype=gParameters['data_type'], val_split=gParameters['val_split'], test_cell_split=gParameters['test_cell_split'], cell_features=gParameters['cell_features'], drug_features=gParameters['drug_features'], feature_subsample=gParameters['feature_subsample'], scaling=gParameters['scaling'], scramble=gParameters['scramble'], min_logconc=gParameters['min_logconc'], max_logconc=gParameters['max_logconc'], subsample=gParameters['subsample'], category_cutoffs=gParameters['category_cutoffs']) # Initialize weights and learning rule initializer_weights = candle.build_initializer( gParameters['initialization'], kerasDefaults, seed) initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.) activation = gParameters['activation'] # Define model architecture gen_shape = None out_dim = 1 model = Sequential() if 'dense' in gParameters: # Build dense layers for layer in gParameters['dense']: if layer: model.add( Dense(layer, input_dim=loader.input_dim, kernel_initializer=initializer_weights, bias_initializer=initializer_bias)) if gParameters['batch_normalization']: model.add(BatchNormalization()) model.add(Activation(gParameters['activation'])) if gParameters['dropout']: model.add(Dropout(gParameters['dropout'])) else: # Build convolutional layers gen_shape = 'add_1d' layer_list = list(range(0, len(gParameters['conv']))) lc_flag = False if 'locally_connected' in gParameters: lc_flag = True for l, i in enumerate(layer_list): if i == 0: add_conv_layer(model, gParameters['conv'][i], input_dim=loader.input_dim, locally_connected=lc_flag) else: add_conv_layer(model, gParameters['conv'][i], locally_connected=lc_flag) if gParameters['batch_normalization']: model.add(BatchNormalization()) model.add(Activation(gParameters['activation'])) if gParameters['pool']: model.add(MaxPooling1D(pool_size=gParameters['pool'])) model.add(Flatten()) model.add(Dense(out_dim)) # Define optimizer optimizer = candle.build_optimizer(gParameters['optimizer'], gParameters['learning_rate'], kerasDefaults) # Compile and display model model.compile(loss=gParameters['loss'], optimizer=optimizer) model.summary() benchmark.logger.debug('Model: {}'.format(model.to_json())) train_gen = benchmark.DataGenerator( loader, batch_size=gParameters['batch_size'], shape=gen_shape, name='train_gen', cell_noise_sigma=gParameters['cell_noise_sigma']).flow() val_gen = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen').flow() val_gen2 = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen2').flow() test_gen = benchmark.DataGenerator(loader, partition='test', batch_size=gParameters['batch_size'], shape=gen_shape, name='test_gen').flow() train_steps = int(loader.n_train / gParameters['batch_size']) val_steps = int(loader.n_val / gParameters['batch_size']) test_steps = int(loader.n_test / gParameters['batch_size']) if 'train_steps' in gParameters: train_steps = gParameters['train_steps'] if 'val_steps' in gParameters: val_steps = gParameters['val_steps'] if 'test_steps' in gParameters: test_steps = gParameters['test_steps'] checkpointer = ModelCheckpoint(filepath=gParameters['output_dir'] + '.model' + ext + '.h5', save_best_only=True) progbar = MyProgbarLogger(train_steps * gParameters['batch_size']) loss_history = MyLossHistory( progbar=progbar, val_gen=val_gen2, test_gen=test_gen, val_steps=val_steps, test_steps=test_steps, metric=gParameters['loss'], category_cutoffs=gParameters['category_cutoffs'], ext=ext, pre=gParameters['output_dir']) # Seed random generator for training np.random.seed(seed) candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters) history = model.fit_generator( train_gen, train_steps, epochs=gParameters['epochs'], validation_data=val_gen, validation_steps=val_steps, verbose=0, callbacks=[checkpointer, loss_history, progbar, candleRemoteMonitor], ) benchmark.logger.removeHandler(fh) benchmark.logger.removeHandler(sh) return history
def run(params): args = candle.ArgumentStruct(**params) candle.set_seed(args.rng_seed) ext = uno.extension_from_parameters(args) candle.verify_path(args.save_path) prefix = args.save_path + 'uno' + ext logfile = args.logfile if args.logfile else prefix+'.log' uno.set_up_logger(logfile, logger, uno.loggerUno, args.verbose) logger.info('Params: {}'.format(params)) # Exclude drugs / cells for UQ if 'uq_exclude_drugs_file' in params.keys(): args.exclude_drugs = uno.read_IDs_file(args.uq_exclude_drugs_file) logger.info('Drugs to exclude: {}'.format(args.exclude_drugs)) else: args.exclude_drugs = [] if 'uq_exclude_cells_file' in params.keys(): args.exclude_cells = uno.read_IDs_file(args.uq_exclude_cells_file) logger.info('Cells to exclude: {}'.format(args.exclude_cells)) else: args.exclude_cells = [] if 'uq_exclude_indices_file' in params.keys(): exclude_indices_ = uno.read_IDs_file(args.uq_exclude_indices_file) args.exclude_indices = [int(x) for x in exclude_indices_] logger.info('Indices to exclude: {}'.format(args.exclude_indices)) else: args.exclude_indices = [] if (len(args.gpus) > 0): import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = ",".join(map(str, args.gpus)) K.set_session(tf.Session(config=config)) loader = uno_combined_data_loader.CombinedDataLoader(seed=args.rng_seed) loader.load(cache=args.cache, ncols=args.feature_subsample, agg_dose=args.agg_dose, cell_features=args.cell_features, drug_features=args.drug_features, drug_median_response_min=args.drug_median_response_min, drug_median_response_max=args.drug_median_response_max, use_landmark_genes=args.use_landmark_genes, use_filtered_genes=args.use_filtered_genes, cell_feature_subset_path=args.cell_feature_subset_path or args.feature_subset_path, drug_feature_subset_path=args.drug_feature_subset_path or args.feature_subset_path, preprocess_rnaseq=args.preprocess_rnaseq, single=args.single, train_sources=args.train_sources, test_sources=args.test_sources, embed_feature_source=not args.no_feature_source, encode_response_source=not args.no_response_source, ) target = args.agg_dose or 'Growth' val_split = args.val_split train_split = 1 - val_split loader.partition_data(partition_by=args.partition_by, cv_folds=args.cv, train_split=train_split, val_split=val_split, cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug, cell_subset_path=args.cell_subset_path, drug_subset_path=args.drug_subset_path, exclude_cells=args.exclude_cells, exclude_drugs=args.exclude_drugs, exclude_indices=args.exclude_indices ) model = uno_model_utils.build_model(loader, args, logger) logger.info('Combined model:') model.summary(print_fn=logger.info) # plot_model(model, to_file=prefix+'.model.png', show_shapes=True) if args.cp: model_json = model.to_json() with open(prefix+'.model.json', 'w') as f: print(model_json, file=f) def warmup_scheduler(epoch): lr = args.learning_rate or base_lr * args.batch_size/100 if epoch <= 5: K.set_value(model.optimizer.lr, (base_lr * (5-epoch) + lr * epoch) / 5) logger.debug('Epoch {}: lr={:.5g}'.format(epoch, K.get_value(model.optimizer.lr))) return K.get_value(model.optimizer.lr) df_pred_list = [] cv_ext = '' cv = args.cv if args.cv > 1 else 1 for fold in range(cv): if args.cv > 1: logger.info('Cross validation fold {}/{}:'.format(fold+1, cv)) cv_ext = '.cv{}'.format(fold+1) # model = uno_model_utils.build_model(loader, args, logger, silent=True) template_model = uno_model_utils.build_model(loader, args, logger, silent=True) if args.initial_weights: logger.info("Loading weights from {}".format(args.initial_weights)) template_model.load_weights(args.initial_weights) if len(args.gpus) > 1: from keras.utils import multi_gpu_model gpu_count = len(args.gpus) logger.info("Multi GPU with {} gpus".format(gpu_count)) model = multi_gpu_model(template_model, cpu_merge=False, gpus=gpu_count) else: model = template_model optimizer = optimizers.deserialize({'class_name': args.optimizer, 'config': {}}) base_lr = args.base_lr or K.get_value(optimizer.lr) if args.learning_rate: K.set_value(optimizer.lr, args.learning_rate) if args.loss == 'heteroscedastic': logger.info('Training heteroscedastic model:') model.compile(loss=heteroscedastic_loss, optimizer=optimizer, metrics=[uno_model_utils.mae_heteroscedastic, uno_model_utils.r2_heteroscedastic, uno_model_utils.meanS_heteroscesdastic]) elif args.loss == 'quantile': logger.info('Training quantile model:') model.compile(loss=triple_quantile_loss, optimizer=optimizer, metrics=[uno_model_utils.quantile50, uno_model_utils.quantile10, uno_model_utils.quantile90]) else: logger.info('Training homoscedastic model:') model.compile(loss=args.loss, optimizer=optimizer, metrics=[candle.mae, candle.r2]) # calculate trainable and non-trainable params params.update(candle.compute_trainable_params(model)) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params['timeout']) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001) warmup_lr = LearningRateScheduler(warmup_scheduler) #checkpointer = ModelCheckpoint(prefix+cv_ext+'.weights.h5', save_best_only=True, save_weights_only=True) checkpointer = candle.MultiGPUCheckpoint(prefix + cv_ext + '.model.h5', save_best_only=True) tensorboard = TensorBoard(log_dir="tb/{}{}{}".format(args.tb_prefix, ext, cv_ext)) history_logger = candle.LoggingCallback(logger.debug) # model_recorder = uno_model_utils.ModelRecorder() # callbacks = [history_logger, model_recorder] callbacks = [candle_monitor, timeout_monitor, history_logger]#, model_recorder] if args.reduce_lr: callbacks.append(reduce_lr) if args.warmup_lr: callbacks.append(warmup_lr) if args.cp: callbacks.append(checkpointer) if args.tb: callbacks.append(tensorboard) if args.save_weights: callbacks.append(uno_model_utils.SimpleWeightSaver(args.save_path + '/' + args.save_weights)) train_gen = uno_combined_data_generator.CombinedDataGenerator(loader, fold=fold, batch_size=args.batch_size, shuffle=args.shuffle) val_gen = uno_combined_data_generator.CombinedDataGenerator(loader, partition='val', fold=fold, batch_size=args.batch_size, shuffle=args.shuffle) df_val = val_gen.get_response(copy=True) y_val = df_val[target].values y_shuf = np.random.permutation(y_val) uno.log_evaluation(uno.evaluate_prediction(y_val, y_shuf), logger, description='Between random pairs in y_val:') if args.no_gen: x_train_list, y_train = train_gen.get_slice(size=train_gen.size, single=args.single) x_val_list, y_val = val_gen.get_slice(size=val_gen.size, single=args.single) history = model.fit(x_train_list, y_train, batch_size=args.batch_size, epochs=args.epochs, callbacks=callbacks, validation_data=(x_val_list, y_val)) else: logger.info('Data points per epoch: train = %d, val = %d',train_gen.size, val_gen.size) logger.info('Steps per epoch: train = %d, val = %d',train_gen.steps, val_gen.steps) history = model.fit_generator(train_gen, train_gen.steps, epochs=args.epochs, callbacks=callbacks, validation_data=val_gen, validation_steps=val_gen.steps) # if args.cp: # model.load_weights(prefix+cv_ext+'.weights.h5') # model = model_recorder.best_model if args.no_gen: y_val_pred = model.predict(x_val_list, batch_size=args.batch_size) else: val_gen.reset() y_val_pred = model.predict_generator(val_gen, val_gen.steps + 1) y_val_pred = y_val_pred[:val_gen.size] if args.loss == 'heteroscedastic': y_val_pred_ = y_val_pred[:,0] s_val_pred = y_val_pred[:,1] y_val_pred = y_val_pred_.flatten() df_val['Predicted_'+target] = y_val_pred df_val[target+'_Error'] = y_val_pred-y_val df_val['Pred_S_'+target] = s_val_pred elif args.loss == 'quantile': y_val_pred_50q = y_val_pred[:,0] y_val_pred_10q = y_val_pred[:,1] y_val_pred_90q = y_val_pred[:,2] y_val_pred = y_val_pred_50q.flatten() # 50th quantile prediction df_val['Predicted_50q_'+target] = y_val_pred df_val[target+'_Error_50q'] = y_val_pred-y_val df_val['Predicted_10q_'+target] = y_val_pred_10q.flatten() df_val['Predicted_90q_'+target] = y_val_pred_90q.flatten() else: y_val_pred = y_val_pred.flatten() # df_val = df_val.assign(PredictedGrowth=y_val_pred, GrowthError=y_val_pred-y_val) df_val['Predicted'+target] = y_val_pred df_val[target+'Error'] = y_val_pred-y_val scores = uno.evaluate_prediction(y_val, y_val_pred) uno.log_evaluation(scores, logger) df_pred_list.append(df_val) # if args.cp: # model_recorder.best_model.save(prefix+'.model.h5') if hasattr(history, 'loss'): candle.plot_history(prefix, history, 'loss') if args.loss == 'heteroscedastic': if hasattr(history, 'r2_heteroscedastic'): candle.plot_history(prefix, history, 'r2_heteroscedastic') if hasattr(history, 'meanS_heteroscedastic'): candle.plot_history(prefix, history, 'meanS_heteroscesdastic') elif args.loss == 'quantile': if hasattr(history, 'quantile50'): candle.plot_history(prefix, history, 'quantile50') if hasattr(history, 'quantile10'): candle.plot_history(prefix, history, 'quantile10') if hasattr(history, 'quantile90'): candle.plot_history(prefix, history, 'quantile90') else: if hasattr(history, 'r2'): candle.plot_history(prefix, history, 'r2') pred_fname = prefix + '.predicted.tsv' df_pred = pd.concat(df_pred_list) if args.agg_dose: if args.single: # df_pred.sort_values(['Source', 'Sample', 'Drug1', target], inplace=True) df_pred.sort_values(['Sample', 'Drug1', target], inplace=True) else: df_pred.sort_values(['Source', 'Sample', 'Drug1', 'Drug2', target], inplace=True) else: if args.single: # df_pred.sort_values(['Source', 'Sample', 'Drug1', 'Dose1', 'Growth'], inplace=True) df_pred.sort_values(['Sample', 'Drug1', 'Dose1', 'Growth'], inplace=True) else: # df_pred.sort_values(['Source', 'Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'], inplace=True) df_pred.sort_values(['Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'], inplace=True) df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g') logger.info('Testing predictions stored in file: {}'.format(pred_fname)) if args.cp: logger.info('Model stored in file: {}'.format(prefix+'.model.h5')) # logger.info('Model weights stored in file: {}'.format(prefix+cv_ext+'.weights.h5')) logger.info('Model weights stored in file: {}'.format(args.save_path + '/' + args.save_weights)) if args.cv > 1: scores = uno.evaluate_prediction(df_pred[target], df_pred['Predicted'+target]) uno.log_evaluation(scores, logger, description='Combining cross validation folds:') for test_source in loader.test_sep_sources: test_gen = uno_combined_data_generator.CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size, source=test_source) df_test = test_gen.get_response(copy=True) y_test = df_test[target].values n_test = len(y_test) if n_test == 0: continue if args.no_gen: x_test_list, y_test = test_gen.get_slice(size=test_gen.size, single=args.single) y_test_pred = model.predict(x_test_list, batch_size=args.batch_size) if args.loss == 'heteroscedastic': y_test_pred = y_test_pred[:,0] elif args.loss == 'quantile': y_test_pred = y_test_pred[:,0] # 50th quantile prediction else: y_test_pred = model.predict_generator(test_gen.flow(single=args.single), test_gen.steps) if args.loss == 'heteroscedastic': y_test_pred = y_test_pred[:test_gen.size,0] elif args.loss == 'quantile': y_test_pred = y_test_pred[:test_gen.size,0] # 50th quantile prediction else: y_test_pred = y_test_pred[:test_gen.size] y_test_pred = y_test_pred.flatten() scores = uno.evaluate_prediction(y_test, y_test_pred) uno.log_evaluation(scores, logger, description='Testing on data from {} ({})'.format(test_source, n_test)) if K.backend() == 'tensorflow': K.clear_session() logger.handlers = [] return history
def run(params): args = candle.ArgumentStruct(**params) seed = args.rng_seed candle.set_seed(seed) # Construct extension to save model ext = extension_from_parameters(params, 'keras') candle.verify_path(params['save_path']) prefix = '{}{}'.format(params['save_path'], ext) logfile = params['logfile'] if params['logfile'] else prefix+'.log' root_fname = 'Agg_attn_abs_bin' candle.set_up_logger(logfile, attn.logger, params['verbose']) attn.logger.info('Params: {}'.format(params)) # Get default parameters for initialization and optimizer functions keras_defaults = candle.keras_default_config() ## X_train, _Y_train, X_val, _Y_val, X_test, _Y_test = attn.load_data(params, seed) # move this inside the load_data function Y_train = _Y_train['AUC'] Y_test = _Y_test['AUC'] Y_val = _Y_val['AUC'] Y_train_neg, Y_train_pos = np.bincount(Y_train) Y_test_neg, Y_test_pos = np.bincount(Y_test) Y_val_neg, Y_val_pos = np.bincount(Y_val) Y_train_total = Y_train_neg + Y_train_pos Y_test_total = Y_test_neg + Y_test_pos Y_val_total = Y_val_neg + Y_val_pos total = Y_train_total + Y_test_total + Y_val_total neg = Y_train_neg + Y_test_neg + Y_val_neg pos = Y_train_pos + Y_test_pos + Y_val_pos print('Examples:\n Total: {}\n Positive: {} ({:.2f}% of total)\n'.format( total, pos, 100 * pos / total)) nb_classes = params['dense'][-1] # Convert classes to categorical with an extra slot for the abstaining class Y_train, Y_test, Y_val = candle.modify_labels(nb_classes+1, Y_train, Y_test, Y_val) # Disable class weight (for initial testing of the abstention classifier) #y_integers = np.argmax(Y_train, axis=1) #class_weights = compute_class_weight('balanced', np.unique(y_integers), y_integers) #d_class_weights = dict(enumerate(class_weights)) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Y_train shape:', Y_train.shape) print('Y_test shape:', Y_test.shape) PS = X_train.shape[1] model = build_attention_model(params, PS) model = candle.add_model_output(model, mode='abstain', num_add=1, activation='sigmoid') print('Model after modifying layer for abstention') model.summary() # Configure abstention model mask_ = np.zeros(nb_classes+1) mask_[-1] = 1 mu0 = 0.5 # In the long term this is not as important since mu auto tunes, however it may require a large number of epochs to converge if set far away from target candle.abstention_variable_initialization(mu0, mask_, nb_classes) #parallel_model = multi_gpu_model(model, gpus=4) #parallel_model.compile(loss='mean_squared_error', # optimizer=SGD(lr=0.0001, momentum=0.9), # metrics=['mae',r2]) kerasDefaults = candle.keras_default_config() if params['momentum']: kerasDefaults['momentum_sgd'] = params['momentum'] optimizer = candle.build_optimizer(params['optimizer'], params['learning_rate'], kerasDefaults) # compile model with abstention loss model.compile(loss=candle.abstention_loss, optimizer=optimizer, metrics=['acc',tf_auc,candle.abs_acc,candle.acc_class1,candle.abs_acc_class1]) # set up a bunch of callbacks to do work during model training.. checkpointer = ModelCheckpoint(filepath=params['save_path'] + root_fname + '.autosave.model.h5', verbose=1, save_weights_only=False, save_best_only=True) csv_logger = CSVLogger('{}/{}.training.log'.format(params['save_path'], root_fname)) reduce_lr = ReduceLROnPlateau(monitor='val_tf_auc', factor=0.20, patience=40, verbose=1, mode='auto', min_delta=0.0001, cooldown=3, min_lr=0.000000001) early_stop = EarlyStopping(monitor='val_tf_auc', patience=200, verbose=1, mode='auto') candle_monitor = candle.CandleRemoteMonitor(params=params) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params['timeout']) tensorboard = TensorBoard(log_dir="tb/tb{}".format(ext)) history_logger = candle.LoggingCallback(attn.logger.debug) abstention_cbk = candle.AbstentionAdapt_Callback(monitor='val_abs_acc_class1', scale_factor=params['abs_scale_factor'], target_acc=params['target_abs_acc']) callbacks = [candle_monitor, timeout_monitor, csv_logger, history_logger, abstention_cbk] if params['reduce_lr']: callbacks.append(reduce_lr) if params['use_cp']: callbacks.append(checkpointer) if params['use_tb']: callbacks.append(tensorboard) if params['early_stop']: callbacks.append(early_stop) epochs = params['epochs'] batch_size=params['batch_size'] history = model.fit(X_train, Y_train, #class_weight=d_class_weights, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, Y_val), callbacks = callbacks) # diagnostic plots if 'loss' in history.history.keys(): candle.plot_history(params['save_path'] + root_fname, history, 'loss') if 'acc' in history.history.keys(): candle.plot_history(params['save_path'] + root_fname, history, 'acc') if 'abs_acc' in history.history.keys(): candle.plot_history(params['save_path'] + root_fname, history, 'abs_acc') # Plot mu evolution fname = params['save_path'] + root_fname + '.mu.png' xlabel='Epochs' ylabel='Abstention Weight mu' title='mu Evolution' attnviz.plot_array(abstention_cbk.muvalues, xlabel, ylabel, title, fname) # Evaluate model score = model.evaluate(X_test, Y_test, verbose=0) Y_predict = model.predict(X_test) evaluate_abstention(params, root_fname, nb_classes, Y_test, _Y_test, Y_predict, pos, total, score) save_and_test_saved_model(params, model, root_fname, X_train, X_test, Y_test) attn.logger.handlers = [] return history
def run(gParameters): print ('Params:', gParameters) file_train = gParameters['train_data'] file_test = gParameters['test_data'] url = gParameters['data_url'] train_file = candle.get_file(file_train, url+file_train, cache_subdir='Pilot1') test_file = candle.get_file(file_test, url+file_test, cache_subdir='Pilot1') X_train, Y_train, X_test, Y_test = bmk.load_data(train_file, test_file, gParameters) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Y_train shape:', Y_train.shape) print('Y_test shape:', Y_test.shape) x_train_len = X_train.shape[1] # this reshaping is critical for the Conv1D to work X_train = np.expand_dims(X_train, axis=2) X_test = np.expand_dims(X_test, axis=2) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) model = Sequential() layer_list = list(range(0, len(gParameters['conv']), 3)) for l, i in enumerate(layer_list): filters = gParameters['conv'][i] filter_len = gParameters['conv'][i+1] stride = gParameters['conv'][i+2] print(int(i/3), filters, filter_len, stride) if gParameters['pool']: pool_list=gParameters['pool'] if type(pool_list) != list: pool_list=list(pool_list) if filters <= 0 or filter_len <= 0 or stride <= 0: break if 'locally_connected' in gParameters: model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1))) else: #input layer if i == 0: model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1))) else: model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid')) model.add(Activation(gParameters['activation'])) if gParameters['pool']: model.add(MaxPooling1D(pool_size=pool_list[int(i/3)])) model.add(Flatten()) for layer in gParameters['dense']: if layer: model.add(Dense(layer)) model.add(Activation(gParameters['activation'])) if gParameters['dropout']: model.add(Dropout(gParameters['dropout'])) model.add(Dense(gParameters['classes'])) model.add(Activation(gParameters['out_activation'])) #Reference case #model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1))) #model.add(Activation('relu')) #model.add(MaxPooling1D(pool_size=1)) #model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid')) #model.add(Activation('relu')) #model.add(MaxPooling1D(pool_size=10)) #model.add(Flatten()) #model.add(Dense(200)) #model.add(Activation('relu')) #model.add(Dropout(0.1)) #model.add(Dense(20)) #model.add(Activation('relu')) #model.add(Dropout(0.1)) #model.add(Dense(CLASSES)) #model.add(Activation('softmax')) kerasDefaults = candle.keras_default_config() # Define optimizer optimizer = candle.build_optimizer(gParameters['optimizer'], gParameters['learning_rate'], kerasDefaults) model.summary() model.compile(loss=gParameters['loss'], optimizer=optimizer, metrics=[gParameters['metrics']]) output_dir = gParameters['output_dir'] if not os.path.exists(output_dir): os.makedirs(output_dir) # calculate trainable and non-trainable params gParameters.update(candle.compute_trainable_params(model)) # set up a bunch of callbacks to do work during model training.. model_name = gParameters['model_name'] path = '{}/{}.autosave.model.h5'.format(output_dir, model_name) checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True) csv_logger = CSVLogger('{}/training.log'.format(output_dir)) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters) timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout']) history = model.fit(X_train, Y_train, batch_size=gParameters['batch_size'], epochs=gParameters['epochs'], verbose=1, validation_data=(X_test, Y_test), callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor]) score = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1]) # serialize model to JSON model_json = model.to_json() with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file: json_file.write(model_json) # serialize model to YAML model_yaml = model.to_yaml() with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file: yaml_file.write(model_yaml) # serialize weights to HDF5 model.save_weights("{}/{}.weights.h5".format(output_dir, model_name)) print("Saved model to disk") # load json and create model json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model_json = model_from_json(loaded_model_json) # load yaml and create model yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r') loaded_model_yaml = yaml_file.read() yaml_file.close() loaded_model_yaml = model_from_yaml(loaded_model_yaml) # load weights into new model loaded_model_json.load_weights('{}/{}.weights.h5'.format(output_dir, model_name)) print("Loaded json model from disk") # evaluate json loaded model on test data loaded_model_json.compile(loss=gParameters['loss'], optimizer=gParameters['optimizer'], metrics=[gParameters['metrics']]) score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0) print('json Test score:', score_json[0]) print('json Test accuracy:', score_json[1]) print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100)) # load weights into new model loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(output_dir, model_name)) print("Loaded yaml model from disk") # evaluate loaded model on test data loaded_model_yaml.compile(loss=gParameters['loss'], optimizer=gParameters['optimizer'], metrics=[gParameters['metrics']]) score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0) print('yaml Test score:', score_yaml[0]) print('yaml Test accuracy:', score_yaml[1]) print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100)) return history
def run(params): args = candle.ArgumentStruct(**params) seed = args.rng_seed candle.set_seed(seed) # Construct extension to save model ext = p1b1.extension_from_parameters(params, '.keras') candle.verify_path(params['save_path']) prefix = '{}{}'.format(params['save_path'], ext) logfile = params['logfile'] if params['logfile'] else prefix+'.log' candle.set_up_logger(logfile, p1b1.logger, params['verbose']) p1b1.logger.info('Params: {}'.format(params)) # Get default parameters for initialization and optimizer functions keras_defaults = candle.keras_default_config() # Load dataset x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels = p1b1.load_data(params, seed) # cache_file = 'data_l1000_cache.h5' # save_cache(cache_file, x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels) # x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels = load_cache(cache_file) p1b1.logger.info("Shape x_train: {}".format(x_train.shape)) p1b1.logger.info("Shape x_val: {}".format(x_val.shape)) p1b1.logger.info("Shape x_test: {}".format(x_test.shape)) p1b1.logger.info("Range x_train: [{:.3g}, {:.3g}]".format(np.min(x_train), np.max(x_train))) p1b1.logger.info("Range x_val: [{:.3g}, {:.3g}]".format(np.min(x_val), np.max(x_val))) p1b1.logger.info("Range x_test: [{:.3g}, {:.3g}]".format(np.min(x_test), np.max(x_test))) p1b1.logger.debug('Class labels') for i, label in enumerate(y_labels): p1b1.logger.debug(' {}: {}'.format(i, label)) # clf = build_type_classifier(x_train, y_train, x_val, y_val) n_classes = len(y_labels) cond_train = y_train cond_val = y_val cond_test = y_test input_dim = x_train.shape[1] cond_dim = cond_train.shape[1] latent_dim = params['latent_dim'] activation = params['activation'] dropout = params['dropout'] dense_layers = params['dense'] dropout_layer = AlphaDropout if params['alpha_dropout'] else Dropout # Initialize weights and learning rule initializer_weights = candle.build_initializer(params['initialization'], keras_defaults, seed) initializer_bias = candle.build_initializer('constant', keras_defaults, 0.) if dense_layers is not None: if type(dense_layers) != list: dense_layers = list(dense_layers) else: dense_layers = [] # Encoder Part x_input = Input(shape=(input_dim,)) cond_input = Input(shape=(cond_dim,)) h = x_input if params['model'] == 'cvae': h = keras.layers.concatenate([x_input, cond_input]) for i, layer in enumerate(dense_layers): if layer > 0: x = h h = Dense(layer, activation=activation, kernel_initializer=initializer_weights, bias_initializer=initializer_bias)(h) if params['residual']: try: h = keras.layers.add([h, x]) except ValueError: pass if params['batch_normalization']: h = BatchNormalization()(h) if dropout > 0: h = dropout_layer(dropout)(h) if params['model'] == 'ae': encoded = Dense(latent_dim, activation=activation, kernel_initializer=initializer_weights, bias_initializer=initializer_bias)(h) else: epsilon_std = params['epsilon_std'] z_mean = Dense(latent_dim, name='z_mean')(h) z_log_var = Dense(latent_dim, name='z_log_var')(h) encoded = z_mean def vae_loss(x, x_decoded_mean): xent_loss = binary_crossentropy(x, x_decoded_mean) kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(xent_loss + kl_loss/input_dim) def sampling(params): z_mean_, z_log_var_ = params batch_size = K.shape(z_mean_)[0] epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0., stddev=epsilon_std) return z_mean_ + K.exp(z_log_var_ / 2) * epsilon z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var]) if params['model'] == 'cvae': z_cond = keras.layers.concatenate([z, cond_input]) # Decoder Part decoder_input = Input(shape=(latent_dim,)) h = decoder_input if params['model'] == 'cvae': h = keras.layers.concatenate([decoder_input, cond_input]) for i, layer in reversed(list(enumerate(dense_layers))): if layer > 0: x = h h = Dense(layer, activation=activation, kernel_initializer=initializer_weights, bias_initializer=initializer_bias)(h) if params['residual']: try: h = keras.layers.add([h, x]) except ValueError: pass if params['batch_normalization']: h = BatchNormalization()(h) if dropout > 0: h = dropout_layer(dropout)(h) decoded = Dense(input_dim, activation='sigmoid', kernel_initializer=initializer_weights, bias_initializer=initializer_bias)(h) # Build autoencoder model if params['model'] == 'cvae': encoder = Model([x_input, cond_input], encoded) decoder = Model([decoder_input, cond_input], decoded) model = Model([x_input, cond_input], decoder([z, cond_input])) loss = vae_loss metrics = [xent, corr, mse] elif params['model'] == 'vae': encoder = Model(x_input, encoded) decoder = Model(decoder_input, decoded) model = Model(x_input, decoder(z)) loss = vae_loss metrics = [xent, corr, mse] else: encoder = Model(x_input, encoded) decoder = Model(decoder_input, decoded) model = Model(x_input, decoder(encoded)) loss = params['loss'] metrics = [xent, corr] model.summary() decoder.summary() if params['cp']: model_json = model.to_json() with open(prefix+'.model.json', 'w') as f: print(model_json, file=f) # Define optimizer # optimizer = candle.build_optimizer(params['optimizer'], # params['learning_rate'], # keras_defaults) optimizer = optimizers.deserialize({'class_name': params['optimizer'], 'config': {}}) base_lr = params['base_lr'] or K.get_value(optimizer.lr) if params['learning_rate']: K.set_value(optimizer.lr, params['learning_rate']) model.compile(loss=loss, optimizer=optimizer, metrics=metrics) # calculate trainable and non-trainable params params.update(candle.compute_trainable_params(model)) def warmup_scheduler(epoch): lr = params['learning_rate'] or base_lr * params['batch_size']/100 if epoch <= 5: K.set_value(model.optimizer.lr, (base_lr * (5-epoch) + lr * epoch) / 5) p1b1.logger.debug('Epoch {}: lr={}'.format(epoch, K.get_value(model.optimizer.lr))) return K.get_value(model.optimizer.lr) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001) warmup_lr = LearningRateScheduler(warmup_scheduler) checkpointer = ModelCheckpoint(params['save_path']+ext+'.weights.h5', save_best_only=True, save_weights_only=True) tensorboard = TensorBoard(log_dir="tb/tb{}".format(ext)) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params['timeout']) history_logger = LoggingCallback(p1b1.logger.debug) callbacks = [candle_monitor, timeout_monitor, history_logger] if params['reduce_lr']: callbacks.append(reduce_lr) if params['warmup_lr']: callbacks.append(warmup_lr) if params['cp']: callbacks.append(checkpointer) if params['tb']: callbacks.append(tensorboard) x_val2 = np.copy(x_val) np.random.shuffle(x_val2) start_scores = p1b1.evaluate_autoencoder(x_val, x_val2) p1b1.logger.info('\nBetween random pairs of validation samples: {}'.format(start_scores)) if params['model'] == 'cvae': inputs = [x_train, cond_train] val_inputs = [x_val, cond_val] test_inputs = [x_test, cond_test] else: inputs = x_train val_inputs = x_val test_inputs = x_test outputs = x_train val_outputs = x_val test_outputs = x_test history = model.fit(inputs, outputs, verbose=2, batch_size=params['batch_size'], epochs=params['epochs'], callbacks=callbacks, validation_data=(val_inputs, val_outputs)) if params['cp']: encoder.save(prefix+'.encoder.h5') decoder.save(prefix+'.decoder.h5') candle.plot_history(prefix, history, 'loss') candle.plot_history(prefix, history, 'corr', 'streaming pearson correlation') # Evalute model on test set x_pred = model.predict(test_inputs) scores = p1b1.evaluate_autoencoder(x_pred, x_test) p1b1.logger.info('\nEvaluation on test data: {}'.format(scores)) x_test_encoded = encoder.predict(test_inputs, batch_size=params['batch_size']) y_test_classes = np.argmax(y_test, axis=1) candle.plot_scatter(x_test_encoded, y_test_classes, prefix+'.latent') if params['tsne']: tsne = TSNE(n_components=2, random_state=seed) x_test_encoded_tsne = tsne.fit_transform(x_test_encoded) candle.plot_scatter(x_test_encoded_tsne, y_test_classes, prefix+'.latent.tsne') # diff = x_pred - x_test # plt.hist(diff.ravel(), bins='auto') # plt.title("Histogram of Errors with 'auto' bins") # plt.savefig('histogram_keras.png') # generate synthetic data # epsilon_std = 1.0 # for i in range(1000): # z_sample = np.random.normal(size=(1, 2)) * epsilon_std # x_decoded = decoder.predict(z_sample) p1b1.logger.handlers = [] return history
output_dir = hyperparams['save'] if not os.path.exists(output_dir): os.makedirs(output_dir) # calculate trainable and non-trainable params hyperparams.update(candle.compute_trainable_params(model)) # set up a bunch of callbacks to do work during model training.. model_name = hyperparams['model_name'] path = '{}/{}.autosave.model.h5'.format(output_dir, model_name) # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True) csv_logger = CSVLogger('{}/training.log'.format(output_dir)) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) candleRemoteMonitor = candle.CandleRemoteMonitor(params=hyperparams) timeoutMonitor = candle.TerminateOnTimeOut(hyperparams['timeout']) history2 = model.fit(X_train, Y_train, batch_size=hyperparams['batch_size'], epochs=hyperparams['epochs'], verbose=1, validation_data=(X_test, Y_test), callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor]) score = model.evaluate(X_test, Y_test, verbose=0) val_to_return = score[0] print(model.metrics_names) print(score)
def run(params): args = candle.ArgumentStruct(**params) seed = args.rng_seed candle.set_seed(seed) # Construct extension to save model ext = attn.extension_from_parameters(params, "keras") candle.verify_path(params["save_path"]) prefix = "{}{}".format(params["save_path"], ext) logfile = params["logfile"] if params["logfile"] else prefix + ".log" root_fname = "Agg_attn_bin" candle.set_up_logger(logfile, attn.logger, params["verbose"]) attn.logger.info("Params: {}".format(params)) # Get default parameters for initialization and optimizer functions keras_defaults = candle.keras_default_config() ## X_train, _Y_train, X_val, _Y_val, X_test, _Y_test = attn.load_data( params, seed) # move this inside the load_data function Y_train = _Y_train["AUC"] Y_test = _Y_test["AUC"] Y_val = _Y_val["AUC"] Y_train_neg, Y_train_pos = np.bincount(Y_train) Y_test_neg, Y_test_pos = np.bincount(Y_test) Y_val_neg, Y_val_pos = np.bincount(Y_val) Y_train_total = Y_train_neg + Y_train_pos Y_test_total = Y_test_neg + Y_test_pos Y_val_total = Y_val_neg + Y_val_pos total = Y_train_total + Y_test_total + Y_val_total neg = Y_train_neg + Y_test_neg + Y_val_neg pos = Y_train_pos + Y_test_pos + Y_val_pos print("Examples:\n Total: {}\n Positive: {} ({:.2f}% of total)\n". format(total, pos, 100 * pos / total)) nb_classes = params["dense"][-1] Y_train = np_utils.to_categorical(Y_train, nb_classes) Y_test = np_utils.to_categorical(Y_test, nb_classes) Y_val = np_utils.to_categorical(Y_val, nb_classes) y_integers = np.argmax(Y_train, axis=1) class_weights = compute_class_weight("balanced", np.unique(y_integers), y_integers) d_class_weights = dict(enumerate(class_weights)) print("X_train shape:", X_train.shape) print("X_test shape:", X_test.shape) print("Y_train shape:", Y_train.shape) print("Y_test shape:", Y_test.shape) PS = X_train.shape[1] model = build_attention_model(params, PS) # parallel_model = multi_gpu_model(model, gpus=4) # parallel_model.compile(loss='mean_squared_error', # optimizer=SGD(lr=0.0001, momentum=0.9), # metrics=['mae',r2]) kerasDefaults = candle.keras_default_config() if params["momentum"]: kerasDefaults["momentum_sgd"] = params["momentum"] optimizer = candle.build_optimizer(params["optimizer"], params["learning_rate"], kerasDefaults) model.compile( loss=params["loss"], optimizer=optimizer, # SGD(lr=0.00001, momentum=0.9), # optimizer=Adam(lr=0.00001), # optimizer=RMSprop(lr=0.0001), # optimizer=Adadelta(), metrics=[ "acc", tf.keras.metrics.AUC(name="auroc", curve="ROC"), tf.keras.metrics.AUC(name="aucpr", curve="PR"), ], ) # set up a bunch of callbacks to do work during model training.. checkpointer = ModelCheckpoint( filepath=params["save_path"] + root_fname + ".autosave.model.h5", verbose=1, save_weights_only=False, save_best_only=True, ) csv_logger = CSVLogger("{}/{}.training.log".format(params["save_path"], root_fname)) reduce_lr = ReduceLROnPlateau( monitor="val_auroc", factor=0.20, patience=40, verbose=1, mode="auto", min_delta=0.0001, cooldown=3, min_lr=0.000000001, ) early_stop = EarlyStopping(monitor="val_auroc", patience=200, verbose=1, mode="auto") candle_monitor = candle.CandleRemoteMonitor(params=params) candle_monitor = candle.CandleRemoteMonitor(params=params) timeout_monitor = candle.TerminateOnTimeOut(params["timeout"]) tensorboard = TensorBoard(log_dir="tb/tb{}".format(ext)) history_logger = LoggingCallback(attn.logger.debug) callbacks = [candle_monitor, timeout_monitor, csv_logger, history_logger] if params["reduce_lr"]: callbacks.append(reduce_lr) if params["use_cp"]: callbacks.append(checkpointer) if params["use_tb"]: callbacks.append(tensorboard) if params["early_stop"]: callbacks.append(early_stop) epochs = params["epochs"] batch_size = params["batch_size"] history = model.fit( X_train, Y_train, class_weight=d_class_weights, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, Y_val), callbacks=callbacks, ) # diagnostic plots if "loss" in history.history.keys(): candle.plot_history(params["save_path"] + root_fname, history, "loss") if "acc" in history.history.keys(): candle.plot_history(params["save_path"] + root_fname, history, "acc") if "auroc" in history.history.keys(): candle.plot_history(params["save_path"] + root_fname, history, "auroc") if "auprc" in history.history.keys(): candle.plot_history(params["save_path"] + root_fname, history, "aucpr") # Evaluate model score = model.evaluate(X_test, Y_test, verbose=0) Y_predict = model.predict(X_test) evaluate_model(params, root_fname, nb_classes, Y_test, _Y_test, Y_predict, pos, total, score) save_and_test_saved_model(params, model, root_fname, X_train, X_test, Y_test) attn.logger.handlers = [] return history
def run_cnn(GP, train_x, train_y, test_x, test_y, learning_rate=0.01, batch_size=10, epochs=10, dropout=0.5, optimizer='adam', wv_len=300, filter_sizes=[3, 4, 5], num_filters=[300, 300, 300], emb_l2=0.001, w_l2=0.01): max_vocab = np.max(train_x) max_vocab2 = np.max(test_x) if max_vocab2 > max_vocab: max_vocab = max_vocab2 wv_mat = np.random.randn(max_vocab + 1, wv_len).astype('float32') * 0.1 num_classes = [] num_classes.append(np.max(train_y[:, 0]) + 1) num_classes.append(np.max(train_y[:, 1]) + 1) num_classes.append(np.max(train_y[:, 2]) + 1) num_classes.append(np.max(train_y[:, 3]) + 1) kerasDefaults = candle.keras_default_config() optimizer_run = candle.build_optimizer(optimizer, learning_rate, kerasDefaults) cnn = keras_mt_shared_cnn.init_export_network(num_classes=num_classes, in_seq_len=1500, vocab_size=len(wv_mat), wv_space=wv_len, filter_sizes=filter_sizes, num_filters=num_filters, concat_dropout_prob=dropout, emb_l2=emb_l2, w_l2=w_l2, optimizer=optimizer_run) print(cnn.summary()) validation_data = ({ 'Input': test_x }, { 'Dense0': test_y[:, 0], 'Dense1': test_y[:, 1], 'Dense2': test_y[:, 2], 'Dense3': test_y[:, 3] }) # candleRemoteMonitor = CandleRemoteMonitor(params= GP) # timeoutMonitor = TerminateOnTimeOut(TIMEOUT) candleRemoteMonitor = candle.CandleRemoteMonitor(params=GP) timeoutMonitor = candle.TerminateOnTimeOut(GP['timeout']) history = cnn.fit(x=np.array(train_x), y=[ np.array(train_y[:, 0]), np.array(train_y[:, 1]), np.array(train_y[:, 2]), np.array(train_y[:, 3]) ], batch_size=batch_size, epochs=epochs, verbose=2, validation_data=validation_data, callbacks=[candleRemoteMonitor, timeoutMonitor]) return history