def test_carina(): # Load the data. carina_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/carina.tiff') image_data = Data(location=carina_location, radec=(10.7502222, -59.8677778), meta={}, processing=[]) image_data.get_data() # Add to the data collection dc = DataCollection() dc.add(image_data) assert len(dc) == 1 # # Create the cutouts with a processing step applied # sliding_window_cutouts = BasicCutoutGenerator(output_size=224, step_size=550) cc = CutoutCollection() for cutout in sliding_window_cutouts.create_cutouts(image_data): cc.add(cutout) assert len(cc) == 35 cmp_arr = np.array( [[[51, 66, 69], [50, 70, 78]], [[48, 66, 72], [49, 65, 72]]], dtype=np.uint8) assert np.allclose(cc[0].get_data()[:2, :2], cmp_arr) # # Compute the fingerprints for each cutout # fc = FingerprintCollection() fc_save = FingerprintCalculatorResnet().save() for fingerprint in fingerprint_calculate(cc, fc_save): fc.add(fingerprint) assert [x[1] for x in fc[0].predictions[:3] ] == ['hammerhead', 'stingray', 'binder'] # # Compute the similarity metrics # similarity_tsne = similarity_calculate(fc, 'tsne') assert True
def test_end2end(): # Load the data. carina_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/carina.tiff') image_data = Data(location=carina_location, radec=(10.7502222, -59.8677778), meta={}, processing=[]) image_data.get_data() # Add to the data collection dc = DataCollection() dc.add(image_data) # # Create the cutouts with a processing step applied # sliding_window_cutouts = BasicCutoutGenerator(output_size=224, step_size=550) cc = CutoutCollection() for cutout in sliding_window_cutouts.create_cutouts(image_data): cc.add(cutout) # # Compute the fingerprints for each cutout # fc = FingerprintCollection() fc_save = FingerprintCalculatorResnet().save() for fingerprint in fingerprint_calculate(cc, fc_save): fc.add(fingerprint) # # Compute the similarity metrics # similarity_tsne = similarity_calculate(fc, 'tsne') new_similarity_tsne = Similarity.factory(similarity_tsne.save()) assert json.dumps(new_similarity_tsne.save(), sort_keys=True) == json.dumps(similarity_tsne.save(), sort_keys=True)
def load(self, thedict): log.info('Loading cutout') self._uuid = thedict['uuid'] self._data = Data.factory(thedict['data']) self._generator_parameters = thedict['generator_parameters'] self._bounding_box = BoundingBox.load(thedict['bounding_box']) self._base_cutout_uuid = thedict['base_cutout_uuid'] self._cutout_processing = [ ImageProcessing.load(x) for x in thedict['cutout_processing'] ] # Add to the cutout collection CutoutCollection._add(self)
from transfer_learning.cutout import CutoutCollection from transfer_learning.cutout.generators import BasicCutoutGenerator config = ConfigParser() config.read('config.ini') # # Load the data # print('Going to load the HST Heritage data') data = DataCollection() for filename in glob.glob('../../data/heritage/*.???'): print(' adding data {}'.format(filename)) image_data = Data(location=filename, radec=(-32, 12), meta={}) data.add(image_data) # # Create the cutout generator. # print('Going to calculate the sliding window cutouts') sliding_window_cutouts = BasicCutoutGenerator(output_size=224, step_size=112) # # Create the cutouts # cutouts = sliding_window_cutouts.create_cutouts(data) #
# Setup an image processing step on the data. This will convert # any data that is not gray scale into gray scal. # gray_scale = image_processing.GrayScale() # # Now create teh actual data and add to the data collection. # print('Setting up the data structure required') data_collection = DataCollection() np.random.seed(12) for fileinfo in np.random.choice(processing_dict, 200, replace=False): im = Data(location=fileinfo['location'], radec=fileinfo['radec'], meta=fileinfo['meta']) im.add_processing(gray_scale) # Add to the data collection data_collection.add(im) # # Create cutout pre-processing steps, which for this, # is just crop and resize. # cutout_crop = image_processing.Crop([15, -15, 15, -15]) cutout_resize = image_processing.Resize([224, 224]) #
# # Create the data pre-processing. # resize_224 = Resize(output_size=(224, 224)) # # Create the datasets # print('Creating data objects') data = DataCollection() for fileinfo in processing_dict[:20]: im = Data(location=fileinfo['location'], radec=fileinfo['radec'], meta=fileinfo['meta']) im.add_processing(resize_224) data.append(im) # # Create cutout generator # print('Creating the cutout generator') full_cutout = FullImageCutoutGenerator(output_size=(224, 224)) # # Create the cutouts. #
from transfer_learning.cutout import CutoutCollection from transfer_learning.fingerprint import FingerprintCollection from transfer_learning.cutout.generators import BasicCutoutGenerator fc_save = FingerprintCalculatorResnet().save() config = ConfigParser() config.read('config.ini') # # Load the data # print('Going to load the carina data') image_data = Data(location='../../data/carina.tiff', radec=(10.7502222, -59.8677778), meta={}, processing=[]) image_data.get_data() # # Add to the data collection # dc = DataCollection() dc.add(image_data) # # Create the sliding window cutout generator. # print('Creating cutout generator')