def readFromDiskQueue(self, dataAug=''): # actually read images from disk # apply augmentations (string interface) # last input is always the label queueOutput = [] totChannels = np.sum(self.nChannels) for i in range(len(self.internalQueue)): qData = tf.read_file(self.internalQueue[i]) # depending on whether it is a jpeg or png load it that way if(self.fileExts[i] == 'jpg'): ldData = tf.image.decode_jpeg(qData, channels=self.nChannels[i]) elif(self.fileExts[i] == 'png'): ldData = tf.image.decode_png(qData, channels=self.nChannels[i]) else: ldData = tf.image.decode_image(qData, channels=self.nChannels[i]) rldData = tf.image.resize_images(ldData, self.chip_size) queueOutput.append(rldData) queueOutput = tf.squeeze(tf.stack(queueOutput, axis=2), axis=-1) if self.block_mean is not None: queueOutput -= self.block_mean if len(dataAug) > 0: augDat = uabUtilreader.doDataAug(queueOutput, totChannels, dataAug, img_mean=self.block_mean) augDat = tf.image.resize_images(augDat, self.chip_size) else: augDat = queueOutput return [augDat]
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): # this is a iterator for training nDims = len(chipFiles[0]) assert len(chipFiles) == len(self.patch_prob) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) patch_name = [[] for i in range(batch_size)] # select number to sample idx_batch = np.random.choice(len(chipFiles), batch_size, p=self.patch_prob) for cnt, randInd in enumerate(idx_batch): row = chipFiles[randInd] p_name = '_'.join(row[0].split('_')[:2]) blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) store_idx = cnt % batch_size image_batch[store_idx, :, :, :] = augDat patch_name[store_idx] = p_name if (cnt + 1) % batch_size == 0: if self.return_name: yield image_batch[:, :, :, 1:], image_batch[:, :, :, : 1], patch_name else: yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1]
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): # this is a iterator for training nDims = len(chipFiles[0]) image_batch = np.zeros( (batch_size, self.center_crop[0], self.center_crop[1], nDims)) building_truth = np.zeros((batch_size, 2)) # select number to sample # idx_batch = np.random.permutation(len(chipFiles)) idx_batch = np.arange(len(chipFiles)) for cnt, randInd in enumerate(idx_batch): row = chipFiles[randInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) augDat = util_functions.crop_center(augDat, self.center_crop[0], self.center_crop[1]) store_idx = cnt % batch_size image_batch[store_idx, :, :, :] = augDat percent = np.sum( augDat[:, :, 0]) / (self.center_crop[0] * self.center_crop[1]) if percent > self.patch_prob: building_truth[store_idx, :] = [0, 1] else: building_truth[store_idx, :] = [1, 0] if (cnt + 1) % batch_size == 0: yield (image_batch[:, :, :, 1:], building_truth)
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): # this is a iterator for training group = self.group_chip_files(chipFiles) assert len(group) == len(self.group_alpha) random_id = [ np.random.permutation(len(group[i])) for i in range(len(group)) ] group_cnt = [0 for i in range(len(group))] nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) # select number to sample city_batch = np.random.choice(len(group), batch_size, p=self.group_alpha) for cnt, randInd in enumerate(city_batch): patchInd = random_id[randInd][group_cnt[randInd] % len(group[randInd])] group_cnt[randInd] += 1 row = group[randInd][patchInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1]
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, random, padding=(0, 0), dataAug=''): # this is a iterator for training if (random): idx = np.random.permutation(len(chipFiles)) else: idx = np.arange(stop=len(chipFiles)) nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) for cnt, randInd in enumerate(idx): row = chipFiles[randInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat # print('I want to see the bitch!!') # print(image_batch.shape) # print(image_batch[0, :, :, -1]) # what's happened here??? if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], np.expand_dims(image_batch[:, :, :, -1], axis=3)
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): assert batch_size == 1 # this is a iterator for training nDims = len(chipFiles[0]) while True: image_batch = np.zeros((4, patch_size[0], patch_size[1], nDims)) # select number to sample idx_batch = np.random.permutation(len(chipFiles)) for cnt, randInd in enumerate(idx_batch): row = chipFiles[randInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[0, :, :, :] = augDat for i in range(1, 4): image_batch[i, :, :, :] = np.rot90(augDat, k=i, axes=(0, 1)) if (cnt + 1) % batch_size == 0: yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1]
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): # this is a iterator for training # pure random idx = np.random.permutation(len(chipFiles)) nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) cityid_batch = np.zeros(batch_size, dtype=np.uint8) for cnt, randInd in enumerate(idx): row = chipFiles[randInd] blockList = [] nDims = 0 city_name = ''.join( [a for a in row[0].split('_')[0] if not a.isdigit()]) cityid_batch[cnt % batch_size] = self.city_dict[city_name] for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug(block, nDims, dataAug, is_np=True) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], image_batch[:, :, :, : 1], cityid_batch
def readFromDiskIteratorTrain(self, image_dir, chipFiles, batch_size, patch_size, padding=(0, 0), dataAug=''): # this is a iterator for training if self.batch_code == 0: # pure random idx = np.random.permutation(len(chipFiles)) nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) for cnt, randInd in enumerate(idx): row = chipFiles[randInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug( block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1] elif self.batch_code == 1: # random, batches from same tile tile_num, patch_per_tile, tile_name = get_tile_and_patch_num( chipFiles) group = group_by_tile_name(tile_name, chipFiles) tile_idx = np.random.permutation(tile_num) patch_idx = np.random.permutation(patch_per_tile) if patch_per_tile % batch_size != 0: comp_len = batch_size - patch_per_tile % batch_size patch_idx = np.append(patch_idx, patch_idx[:comp_len]) nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) for randInd in tile_idx: for cnt, patchInd in enumerate(patch_idx): row = group[randInd][patchInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug( block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1] else: # random, batches has to from different tiles tile_num, patch_per_tile, tile_name = get_tile_and_patch_num( chipFiles) group = group_by_city_name(tile_name, chipFiles) tile_idx = np.random.permutation(len(group)) nDims = len(chipFiles[0]) while True: image_batch = np.zeros( (batch_size, patch_size[0], patch_size[1], nDims)) for cnt, randInd in enumerate(tile_idx): patchInd = np.random.randint(low=0, high=len(group[0])) row = group[randInd][patchInd] blockList = [] nDims = 0 for file in row: img = util_functions.uabUtilAllTypeLoad( os.path.join(image_dir, file)) if len(img.shape) == 2: img = np.expand_dims(img, axis=2) nDims += img.shape[2] blockList.append(img) block = np.dstack(blockList).astype(np.float32) if self.block_mean is not None: block -= self.block_mean if dataAug != '': augDat = uabUtilreader.doDataAug( block, nDims, dataAug, is_np=True, img_mean=self.block_mean) else: augDat = block if (np.array(padding) > 0).any(): augDat = uabUtilreader.pad_block(augDat, padding) image_batch[cnt % batch_size, :, :, :] = augDat if ((cnt + 1) % batch_size == 0): yield image_batch[:, :, :, 1:], image_batch[:, :, :, :1]