def make_evaluations_gpu(conf, shot_list, loader): loader.set_inference_mode(True) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"GPU": 1}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32' import theano from keras.utils.generic_utils import Progbar from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] batch_size = min(len(shot_list), conf['model']['pred_batch_size']) pbar = Progbar(len(shot_list)) print('evaluating {} shots using batchsize {}'.format( len(shot_list), batch_size)) shot_sublists = shot_list.sublists(batch_size, equal_size=False) all_metrics = [] all_weights = [] for (i, shot_sublist) in enumerate(shot_sublists): batch_size = len(shot_sublist) model = specific_builder.build_model(True, custom_batch_size=batch_size) model.compile(optimizer=optimizer_class(), loss=conf['data']['target'].loss) specific_builder.load_model_weights(model) model.reset_states() X, y, shot_lengths, disr = loader.load_as_X_y_pred( shot_sublist, custom_batch_size=batch_size) #load data and fit on data all_metrics.append( model.evaluate(X, y, batch_size=batch_size, verbose=False)) all_weights.append(batch_size) model.reset_states() pbar.add(1.0 * len(shot_sublist)) loader.verbose = False #True during the first iteration if len(all_metrics) > 1: print('evaluations all: {}'.format(all_metrics)) loss = np.average(all_metrics, weights=all_weights) print('Evaluation Loss: {}'.format(loss)) loader.set_inference_mode(False) return loss
def make_predictions_gpu(conf, shot_list, loader, custom_path=None): loader.set_inference_mode(True) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"GPU": 1}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32' import theano from keras.utils.generic_utils import Progbar from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] model = specific_builder.build_model(True) model.compile(optimizer=optimizer_class(), loss=conf['data']['target'].loss) specific_builder.load_model_weights(model, custom_path) model.reset_states() pbar = Progbar(len(shot_list)) shot_sublists = shot_list.sublists(conf['model']['pred_batch_size'], do_shuffle=False, equal_size=True) for (i, shot_sublist) in enumerate(shot_sublists): X, y, shot_lengths, disr = loader.load_as_X_y_pred(shot_sublist) #load data and fit on data y_p = model.predict(X, batch_size=conf['model']['pred_batch_size']) model.reset_states() y_p = loader.batch_output_to_array(y_p) y = loader.batch_output_to_array(y) #cut arrays back y_p = [arr[:shot_lengths[j]] for (j, arr) in enumerate(y_p)] y = [arr[:shot_lengths[j]] for (j, arr) in enumerate(y)] pbar.add(1.0 * len(shot_sublist)) loader.verbose = False #True during the first iteration y_prime += y_p y_gold += y disruptive += disr y_prime = y_prime[:len(shot_list)] y_gold = y_gold[:len(shot_list)] disruptive = disruptive[:len(shot_list)] loader.set_inference_mode(False) return y_prime, y_gold, disruptive
def make_predictions_gpu(conf,shot_list,loader,custom_path=None): loader.set_inference_mode(True) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"GPU":1}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32' import theano from keras.utils.generic_utils import Progbar from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] model = specific_builder.build_model(True) model.compile(optimizer=optimizer_class(),loss=conf['data']['target'].loss) specific_builder.load_model_weights(model,custom_path) model.reset_states() pbar = Progbar(len(shot_list)) shot_sublists = shot_list.sublists(conf['model']['pred_batch_size'],do_shuffle=False,equal_size=True) for (i,shot_sublist) in enumerate(shot_sublists): X,y,shot_lengths,disr = loader.load_as_X_y_pred(shot_sublist) #load data and fit on data y_p = model.predict(X, batch_size=conf['model']['pred_batch_size']) model.reset_states() y_p = loader.batch_output_to_array(y_p) y = loader.batch_output_to_array(y) #cut arrays back y_p = [arr[:shot_lengths[j]] for (j,arr) in enumerate(y_p)] y = [arr[:shot_lengths[j]] for (j,arr) in enumerate(y)] pbar.add(1.0*len(shot_sublist)) loader.verbose=False#True during the first iteration y_prime += y_p y_gold += y disruptive += disr y_prime = y_prime[:len(shot_list)] y_gold = y_gold[:len(shot_list)] disruptive = disruptive[:len(shot_list)] loader.set_inference_mode(False) return y_prime,y_gold,disruptive
def make_evaluations_gpu(conf,shot_list,loader): loader.set_inference_mode(True) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"GPU":1}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=gpu,floatX=float32' import theano from keras.utils.generic_utils import Progbar from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] batch_size = min(len(shot_list),conf['model']['pred_batch_size']) pbar = Progbar(len(shot_list)) print('evaluating {} shots using batchsize {}'.format(len(shot_list),batch_size)) shot_sublists = shot_list.sublists(batch_size,equal_size=False) all_metrics = [] all_weights = [] for (i,shot_sublist) in enumerate(shot_sublists): batch_size = len(shot_sublist) model = specific_builder.build_model(True,custom_batch_size=batch_size) model.compile(optimizer=optimizer_class(),loss=conf['data']['target'].loss) specific_builder.load_model_weights(model) model.reset_states() X,y,shot_lengths,disr = loader.load_as_X_y_pred(shot_sublist,custom_batch_size=batch_size) #load data and fit on data all_metrics.append(model.evaluate(X,y,batch_size=batch_size,verbose=False)) all_weights.append(batch_size) model.reset_states() pbar.add(1.0*len(shot_sublist)) loader.verbose=False#True during the first iteration if len(all_metrics) > 1: print('evaluations all: {}'.format(all_metrics)) loss = np.average(all_metrics,weights = all_weights) print('Evaluation Loss: {}'.format(loss)) loader.set_inference_mode(False) return loss
def make_predictions(conf,shot_list,loader): loader.set_inference_mode(True) use_cores = max(1,mp.cpu_count()-2) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"CPU":use_cores}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=cpu' import theano from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] model = specific_builder.build_model(True) model.compile(optimizer=optimizer_class(),loss=conf['data']['target'].loss) specific_builder.load_model_weights(model) model_save_path = specific_builder.get_latest_save_path() start_time = time.time() pool = mp.Pool(use_cores) fn = partial(make_single_prediction,builder=specific_builder,loader=loader,model_save_path=model_save_path) print('running in parallel on {} processes'.format(pool._processes)) for (i,(y_p,y,is_disruptive)) in enumerate(pool.imap(fn,shot_list)): print('Shot {}/{}'.format(i,len(shot_list))) sys.stdout.flush() y_prime.append(y_p) y_gold.append(y) disruptive.append(is_disruptive) pool.close() pool.join() print('Finished Predictions in {} seconds'.format(time.time()-start_time)) loader.set_inference_mode(False) return y_prime,y_gold,disruptive
def make_predictions(conf, shot_list, loader): loader.set_inference_mode(True) use_cores = max(1, mp.cpu_count() - 2) if backend == 'tf' or backend == 'tensorflow': first_time = "tensorflow" not in sys.modules if first_time: import tensorflow as tf os.environ['KERAS_BACKEND'] = 'tensorflow' from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto(device_count={"CPU": use_cores}) set_session(tf.Session(config=config)) else: os.environ['THEANO_FLAGS'] = 'device=cpu' import theano from plasma.models.builder import ModelBuilder specific_builder = ModelBuilder(conf) y_prime = [] y_gold = [] disruptive = [] model = specific_builder.build_model(True) model.compile(optimizer=optimizer_class(), loss=conf['data']['target'].loss) specific_builder.load_model_weights(model) model_save_path = specific_builder.get_latest_save_path() start_time = time.time() pool = mp.Pool(use_cores) fn = partial(make_single_prediction, builder=specific_builder, loader=loader, model_save_path=model_save_path) print('running in parallel on {} processes'.format(pool._processes)) for (i, (y_p, y, is_disruptive)) in enumerate(pool.imap(fn, shot_list)): print('Shot {}/{}'.format(i, len(shot_list))) sys.stdout.flush() y_prime.append(y_p) y_gold.append(y) disruptive.append(is_disruptive) pool.close() pool.join() print('Finished Predictions in {} seconds'.format(time.time() - start_time)) loader.set_inference_mode(False) return y_prime, y_gold, disruptive