def _perform_augmentation_segmentation(self, img, label, mask_image, augmenter, event=None, events=False): seq_image, seq_label, seq_mask, seq_event = get_augmenter( name=augmenter, c_val=255) # apply some contrast to de rgb image img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image.augment_images(img) img = img.reshape(img.shape[1:]) label = label.reshape(sum(((1, ), label.shape), ())) label = seq_label.augment_images(label) label = label.reshape(label.shape[1:]) mask_image = mask_image.reshape(sum(((1, ), mask_image.shape), ())) mask_image = seq_mask.augment_images(mask_image) mask_image = mask_image.reshape(mask_image.shape[1:]) if events: event = event.reshape(sum(((1, ), event.shape), ())) # event = self.augment_event(event) event = seq_event.augment_images(event) event = event.reshape(event.shape[1:]) return img, label, mask_image, event return img, label, mask_image
def _perform_augmentation_segmentation(self, img, label, mask_image, augmenter): seq_image_contrast, seq_image_translation, seq_label, seq_mask = get_augmenter( name=augmenter, c_val=self.ignore_label) # apply some contrast to de rgb image img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image_contrast.augment_images(img) img = img.reshape(img.shape[1:]) # Apply shifts and rotations to the mask, labels and image # Reshapes for the AUGMENTER framework # the loops are due to the external library failures img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image_translation.augment_images(img) img = img.reshape(img.shape[1:]) label = label.reshape(sum(((1, ), label.shape), ())) label = seq_label.augment_images(label) label = label.reshape(label.shape[1:]) mask_image = mask_image.reshape(sum(((1, ), mask_image.shape), ())) mask_image = seq_mask.augment_images(mask_image) mask_image = mask_image.reshape(mask_image.shape[1:]) return img, label, mask_image
def _get_batch_rgb(self, size=32, train=True, augmenter=None): x = np.zeros([size, self.height, self.width, self.dim], dtype=np.float32) y = np.zeros([size], dtype=np.uint8) if train: file_list = self.train_list folder = '/train/' # Get [size] random numbers indexes = [ i % len(file_list) for i in range(self.index_train, self.index_train + size) ] self.index_train = indexes[-1] + 1 else: file_list = self.test_list folder = '/test/' # Get [size] random numbers indexes = [ i % len(file_list) for i in range(self.index_test, self.index_test + size) ] self.index_test = indexes[-1] + 1 random_files = [file_list[number] for number in indexes] classes = [ self.classes[file.split(folder)[1].split('/')[0]] for file in random_files ] for index in range(size): img = cv2.imread(random_files[index]) if img is None: print(random_files[index]) print(indexes[index]) if img.shape[1] != self.width or img.shape[0] != self.height: img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) x[index, :, :, :] = img y[index] = classes[index] # the labeling to categorical (if 5 classes and value is 2: 2 -> [0,0,1,0,0]) y = to_categorical(y, num_classes=self.n_classes) # augmentation if augmenter: augmenter_seq = get_augmenter(name=augmenter) x = augmenter_seq.augment_images(x) # x = x.astype(np.float32) # tf.keras.applications.imagenet_utils.preprocess_input(x, mode='tf') # x = tf.keras.applications.xception.preprocess_input(x) x = x.astype(np.float32) / 255.0 - 0.5 return x, y
def _perform_augmentation_segmentation(self, img, label, mask_image, augmenter): seq_image, seq_label, seq_mask = get_augmenter(name=augmenter, c_val=255) #apply some contrast to de rgb image img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image.augment_images(img) img = img.reshape(img.shape[1:]) label = label.reshape(sum(((1, ), label.shape), ())) label = seq_label.augment_images(label) label = label.reshape(label.shape[1:]) mask_image = mask_image.reshape(sum(((1, ), mask_image.shape), ())) mask_image = seq_mask.augment_images(mask_image) mask_image = mask_image.reshape(mask_image.shape[1:]) return img, label, mask_image
def _get_batch_rgb(self, size=32, train=True, augmenter=None): x = np.zeros([size, self.height, self.width, self.dim], dtype=np.float32) y = np.zeros([size], dtype=np.uint8) file_list = self.test_list folder = '/test/' if train: file_list = self.train_list folder = '/train/' # Get [size] random numbers random_files = [ file_list[random.randint(0, len(file_list) - 1)] for file in range(size) ] classes = [ self.classes[file.split(folder)[1].split('/')[0]] for file in random_files ] for index in range(size): img = cv2.imread(random_files[index]) if img.shape[1] != self.width and img.shape[0] != self.height: img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) x[index, :, :, :] = img y[index] = classes[index] # the labeling to categorical (if 5 classes and value is 2: 2 -> [0,0,1,0,0]) y = to_categorical(y, num_classes=len(self.classes)) # augmentation if augmenter: augmenter_seq = get_augmenter(name=augmenter) x = augmenter_seq.augment_images(x) x = x.astype(np.float32) / 255.0 - 0.5 return x, y
def _get_batch_segmentation(self, size=32, train=True, augmenter=None, index=None, validation=False): x = np.zeros([size, self.height, self.width, self.dim], dtype=np.float32) y = np.zeros([size, self.height, self.width], dtype=np.uint8) mask_expanded = np.ones( [size, self.height, self.width, self.n_classes], dtype=np.uint8) image_list = self.image_test_list label_list = self.label_test_list folder = '/test/' if train: image_list = self.image_train_list label_list = self.label_train_list folder = '/train/' # Get [size] random numbers indexes = [ random.randint(0, len(image_list) - 1) for file in range(size) ] if index: indexes = [i for i in range(index, index + size)] random_images = [image_list[number] for number in indexes] random_labels = [label_list[number] for number in indexes] # for every random image, get the image, label and mask. # the augmentation has to be done separately due to augmentation for index in range(size): img = cv2.imread(random_images[index]) label = cv2.imread(random_labels[index], 0) if img.shape[1] != self.width and img.shape[0] != self.height: img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) if label.shape[1] != self.width and label.shape[0] != self.height: label = cv2.resize(label, (self.width, self.height), interpolation=cv2.INTER_NEAREST) macara = mask_expanded[index, :, :, 0] if train and augmenter and random.random() < 0.90: seq_image2, seq_image, seq_label, seq_mask = get_augmenter( name=augmenter, c_val=self.ignore_label) #apply some contrast to de rgb image img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image2.augment_images(img) img = img.reshape(img.shape[1:]) if random.random() < 0.90: #Apply shifts and rotations to the mask, labels and image # Reshapes for the AUGMENTER framework # the loops are due to the external library failures cuenta_ignore = sum(sum(sum(img == self.ignore_label))) cuenta_ignore2 = cuenta_ignore i = 0 while abs(cuenta_ignore2 - cuenta_ignore) < 5 and i < 15: img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image.augment_images(img) img = img.reshape(img.shape[1:]) cuenta_ignore2 = sum(sum( sum(img == self.ignore_label))) i = i + 1 cuenta_ignore = sum(sum(label == self.ignore_label)) cuenta_ignore2 = cuenta_ignore i = 0 while cuenta_ignore2 == cuenta_ignore and i < 15: label = label.reshape(sum(((1, ), label.shape), ())) label = seq_label.augment_images(label) label = label.reshape(label.shape[1:]) cuenta_ignore2 = sum(sum(label == self.ignore_label)) i = i + 1 cuenta_ignore = sum(sum(macara == self.ignore_label)) cuenta_ignore2 = cuenta_ignore i = 0 while cuenta_ignore2 == cuenta_ignore and i < 15: macara = macara.reshape(sum(((1, ), macara.shape), ())) macara = seq_mask.augment_images(macara) macara = macara.reshape(macara.shape[1:]) cuenta_ignore2 = sum(sum(macara == self.ignore_label)) i = i + 1 if self.ignore_label and not validation: #ignore_label to value 0-n_classes and add it to mask mask_ignore = label == self.ignore_label macara[mask_ignore] = 0 label[mask_ignore] = 0 x[index, :, :, :] = img y[index, :, :] = label for i in xrange(mask_expanded.shape[3]): mask_expanded[index, :, :, i] = macara # the labeling to categorical (if 5 classes and value is 2: 2 -> [0,0,1,0,0]) a, b, c = y.shape y = y.reshape((a * b * c)) if self.ignore_label and validation: y = to_categorical(y, num_classes=self.n_classes + 1) else: y = to_categorical(y, num_classes=self.n_classes) y = y.reshape((a, b, c, self.n_classes)).astype(np.uint8) x = x.astype(np.float32) / 255.0 - 0.5 return x, y, mask_expanded
except: pass # all the trainable ops tf.summary.histogram(op.name, op) total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters print("Total parameters of the net: " + str(total_parameters)) augmenter_seq = get_augmenter(name='caltech') show_each_steps = 100 saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state('./model') # './model/best' ckpt_best = tf.train.get_checkpoint_state('./model/best') # './model/best' if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) merged = tf.summary.merge_all() writer_train = tf.summary.FileWriter('./logs/train', sess.graph) writer_test = tf.summary.FileWriter('./logs/test', sess.graph)
def _get_batch_segmentation(self, size=32, train=True, augmenter=None): # init numpy arrays x = np.zeros([size, self.height, self.width, self.dim], dtype=np.float32) y = np.zeros([size, self.height, self.width], dtype=np.uint8) mask = np.ones([size, self.height, self.width], dtype=np.float32) if train: image_list = self.image_train_list label_list = self.label_train_list index_files = self.index_train folder = '/train/' # Get [size] random numbers indexes = [ i % len(image_list) for i in range(self.index_train, self.index_train + size) ] self.index_train = indexes[-1] + 1 else: image_list = self.image_test_list label_list = self.label_test_list folder = '/test/' # Get [size] random numbers indexes = [ i % len(image_list) for i in range(self.index_test, self.index_test + size) ] self.index_test = indexes[-1] + 1 random_images = [image_list[number] for number in indexes] random_labels = [label_list[number] for number in indexes] # for every random image, get the image, label and mask. # the augmentation has to be done separately due to augmentation for index in range(size): if self.dim == 1: img = cv2.imread(random_images[index], 0) else: img = cv2.imread(random_images[index]) label = cv2.imread(random_labels[index], 0) # check if error if img is None or label is None: print(random_images[index]) print(random_labels[index]) print(indexes[index]) if img.shape[1] != self.width or img.shape[0] != self.height: img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) if label.shape[1] != self.width or label.shape[0] != self.height: label = cv2.resize(label, (self.width, self.height), interpolation=cv2.INTER_NEAREST) mask_image = mask[index, :, :] if train and augmenter: seq_image_contrast, seq_image_translation, seq_label, seq_mask = get_augmenter( name=augmenter, c_val=self.ignore_label) #apply some contrast to de rgb image img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image_contrast.augment_images(img) img = img.reshape(img.shape[1:]) #Apply shifts and rotations to the mask, labels and image # Reshapes for the AUGMENTER framework # the loops are due to the external library failures img = img.reshape(sum(((1, ), img.shape), ())) img = seq_image_translation.augment_images(img) img = img.reshape(img.shape[1:]) label = label.reshape(sum(((1, ), label.shape), ())) label = seq_label.augment_images(label) label = label.reshape(label.shape[1:]) mask_image = mask_image.reshape( sum(((1, ), mask_image.shape), ())) mask_image = seq_mask.augment_images(mask_image) mask_image = mask_image.reshape(mask_image.shape[1:]) # modify the mask and the labels. Mask mask_ignore = label == self.ignore_label mask_image[ mask_ignore] = 0 # The ignore pixels will have a value o 0 in the mask label[ mask_ignore] = self.n_classes # The ignore label will be n_classes if self.dim == 1: img = np.reshape(img, (img.shape[0], img.shape[1], self.dim)) x[index, :, :, :] = img y[index, :, :] = label mask[index, :, :] = mask_image # Apply weights to the mask mask = self._from_binarymask_to_weighted_mask(y, mask) # the labeling to categorical (if 5 classes and value is 2: 2 -> [0,0,1,0,0]) a, b, c = y.shape y = y.reshape((a * b * c)) # Convert to categorical. Add one class for ignored pixels y = to_categorical(y, num_classes=self.n_classes + 1) y = y.reshape((a, b, c, self.n_classes + 1)).astype(np.uint8) #tf.keras.applications.imagenet_utils.preprocess_input(x, mode='tf') #x = tf.keras.applications.xception.preprocess_input(x) x = x.astype(np.float32) / 255.0 - 0.5 return x, y, mask