def test_set(self): if len(self.test_data) == 0: return [], [] test_data = self.test_data x_train = utils.load_images(test_data['path'].values, self.input_shape) y_train = test_data['label'].values return x_train, y_train
def next_batch(self, size=10, force_stage=None, reverse_entropy=False, prediction=False): if self.is_done: raise ValueError( "Tried to sample a batch when there is nothing else to sample") logger.debug("Sampling a batch for {} set.".format( self.dataset.current_stage)) self.dataset.set_current_stage() current_stage = force_stage if force_stage else self.dataset.current_stage if current_stage == Dataset.TEST: sampled_df = self.dataset.sample(size) return sampled_df, current_stage, [], [0.5] * len( sampled_df) # TODO: This needs to be fixed # Generate training data sampled_df = self.dataset.sample(size * 5) if self.data_type == Dataset.IMAGE_TYPE: x_data = utils.load_images(sampled_df['path'].values, self.dataset.input_shape) if self.data_type == Dataset.TEXT_TYPE: x_data = sampled_df['text'].values scores = self.model.score(x_data) entropy_func = lambda scores: np.sum(scores * np.log(1 / scores), axis=-1) if type(scores) == list: entropy = np.array( [entropy_func(score).mean() for score in scores]) else: entropy = entropy_func(scores) assert len(entropy.shape) == 1 num = min(size, len(entropy) - 1) if reverse_entropy: entropy_indexes = np.argpartition(entropy, num)[:num] else: entropy_indexes = np.argpartition(-entropy, num)[:num] # Make predictions x_to_score = x_data[entropy_indexes] y_prediction = None if prediction and len(x_to_score) > 0: y_prediction = self.predict(x_to_score) response = ( sampled_df.iloc[entropy_indexes], current_stage, y_prediction, entropy[entropy_indexes].tolist(), ) return response
def unlabelled_set(self, size=MIN_UNSUPERVISED_EXAMPLES): data = self.sample(size) if len(data) > 0: x_train = utils.load_images(data['path'].values, self.input_shape) ids = data['path'].values else: x_train = [] ids = [] return x_train, ids
def _load_pred_data(self, pred_data_path=None): try: if pred_data_path is None: msg = "pred_data_path cannot be None type" raise Exception(msg) model_input_values = None if 'csv' in pred_data_path: # TODO: make sure this is grabbing only the most recent row of the df csv stream (sorted by timestamp etc.) model_input_values = pd.read_csv(pred_data_path)[0].values elif 'jpg' in pred_data_path: # TODO: make sure this is grabbing only the most recent row of the df csv stream (sorted by timestamp etc.) path = load_images( self._s3_resource, pred_data_path ) #TODO: make sure load_images downloads the file locally and returns the file path of the local file img = image.load_img(path, target_size=(300, 300)) x = image.img_to_array(img) model_input_values = np.expand_dims(x, axis=0) #TODO: add logic to delete the locally downloaded input file now that we have the prediction return {'input_data': model_input_values} except Exception as e: msg = "Error in _load_pred_data" raise e(msg)
def __init__(self, params: init.TrainingParamInitialization): self.img_size = params.img_size self.img_channel = params.img_channel self.alpha_channel = params.alpha_channel self.reflectance_channel = params.reflectance_channel self.batch_size = params.batch_size self.D_input_size = params.D_input_size self.G_input_size = params.G_input_size self.image_dir = params.image_dir self.checkpoint_gan = params.checkpoint_gan self.sample_dir = params.sample_dir self.result_dir = params.result_dir self.g_learning_rate = params.g_learning_rate self.d_learning_rate = params.d_learning_rate self.d_clip = params.d_clip # gradient clip on the generater self.gan_model = params.gan_model # valinila gan, WGAN, and LSGAN self.optimizer = params.optimizer self.X = tf.placeholder( tf.float32, shape=[None, self.img_size, self.img_size, self.img_channel]) self.Z = tf.placeholder( tf.float32, shape=[None, self.img_size, self.img_size, self.img_channel]) self.BG = tf.placeholder( tf.float32, shape=[None, self.img_size, self.img_size, self.img_channel]) self.iter_step = params.iter_step self.data = utils.load_images(self.image_dir, self.img_size) print('start building GAN graphics...') self.build_graphics()
# Custom object needed for inference and training custom_objects = { 'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None } print('Loading model...') # Load model into GPU / CPU model = load_model(args.model, custom_objects=custom_objects, compile=False) print('\nModel loaded ({0}).'.format(args.model)) # Input images inputs = load_images(glob.glob(args.input)) print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0], inputs.shape[1:])) # Compute results outputs = predict(model, inputs) #matplotlib problem on ubuntu terminal fix #matplotlib.use('TkAgg') # Display results viz = display_images(outputs.copy(), inputs.copy()) plt.figure(figsize=(10, 5)) plt.imshow(viz) plt.savefig('test.png') plt.show()
import pandas as pd from utils.utils import grid_search_gabor_filters, load_images, create_dir_if_not_exists if __name__ == '__main__': # using gridsearch to find the best gabor filter parameters(psi = const = 0) data_test, data_retest = load_images() static_test = data_test[:, :, 52, 1] moving_test = data_test[:, :, 52, 2] max_mi_test = grid_search_gabor_filters(static_test, moving_test) params_test = max_mi_test[1] df_test = pd.DataFrame(params_test) static_retest = data_retest[:, :, 52, 1] moving_retest = data_retest[:, :, 52, 1] max_mi_retest = grid_search_gabor_filters(static_retest, moving_retest) params_retest = max_mi_retest[1] df_retest = pd.DataFrame(params_retest) params_test_path = "resources/data/test/gabor_filter" params_retest_path = "resources/data/retest/gabor_filter" create_dir_if_not_exists(params_test_path) create_dir_if_not_exists(params_retest_path) df_test.to_csv(params_test_path + '/gabor_filter.csv', index=None, header=True) df_retest.to_csv(params_retest_path + '/gabor_filter.csv', index=None, header=True)
from utils import utils as ul import numpy as np import matplotlib.pyplot as plt import pickle from dipy.data import default_sphere if __name__ == '__main__': sphere = ul.create_sphere(30) points = ul.get_points_coordinates(sphere) data_test, data_retest = ul.load_images() test_img = data_test[0] test_bvals = data_test[1] test_bvecs = data_test[2] retest_img = data_retest[0] retest_bvals = data_retest[1] retest_bvecs = data_retest[2] # bvecs from http://www.emmanuelcaruyer.com/q-space-sampling.php bvecs_generated = np.array([[0.049, -0.919, -0.391], [0.726, 0.301, -0.618], [-0.683, 0.255, -0.684], [0.845, -0.502, -0.186], [-0.730, -0.619, -0.288], [-0.051, 0.039, 0.998], [-0.018, 0.871, -0.491], [-0.444, 0.494, 0.747], [-0.989, -0.086, -0.116], [-0.470, -0.855, 0.221], [0.412, 0.400, 0.819], [-0.552, 0.790, -0.267], [-0.123, -0.477, 0.871],