def save_posterior_patches(self, full_datasets, cascade_no, cropped_patches_dataset=None, face_tables=None, size_of_rf=None): """ Save the information about the cascade and the fprop to here. """ if cascade_no > 1: assert cropped_patches_dataset is not None if face_tables is None: face_tables = [] for dataset in datasets: face_tables.append(dataset.face_table) mode = SequentialSubsetIterator targets = True count_patches = 0 out_files = [] ms_model = self.predictors[cascade_no] n_nonface_patches = 100 for i in xrange(self.nlevels): name = "cascade_%d_lvl_%d" % (cascade_no, i) input_space = ms_model.models[i].get_input_space() models = ms_model.models X = input_space.make_theano_batch() # doesn't support topo yets if X.ndim > 2: assert False dataset = datasets[i] if dataset.iter_mode == "train": dataset.set_iter_mode("fprop") outputs = models[i].fprop(X, cascade_no) batch_size = models[i].batch_size fn = theano.function([X], outputs) n_examples = X.shape[0] model_file_path = ms_model.model_file_path receptive_field_size = models[i].receptive_field_size out_file, gcols = OutputMapFile.create_file( model_file_path, name, n_examples=n_examples, out_shape=(n_examples, size_of_rf[0], size_of_rf[1])) outmap = OutputMap(receptive_field_size, self.img_shape, models.stride) random_patch_extractor = ExtractRandomPatches( patch_shape=size_of_rf, num_patches=n_nonface_patches) grid_patch_extractor = ExtractGridPatches( patch_shape=size_of_rf, patch_stride=models.stride) count_patches = 0 if cascade_no == 0 or models.isconv: dataset = full_datasets else: dataset = cropped_patches_ds #This is a counter for the cascade i where i > 0. for data in dataset.iterator(batch_size=batch_size, mode=mode, targets=True): if cascade_no == 0 or models.isconv: #Extract the patches from the full size image according to the convolution #operation with respect to a specific sized receptive fields #and stride. minibatch_images = data[0] conv_targets = data[1] imgnos = data[2] batch_act = fn(minibatch_images) batch_act = np.concatenate( [elem.reshape(elem.size) for elem in batch_act], axis=0) new_preds = self._check_threshold_non_max_suppression( batch_act, self.reject_probabilities[cascade_no], self.non_max_radius) #Remove the nonface entries: self.remove_nonfaces(new_preds, face_tables[i], imgnos=imgnos, images=minibatch_images) start = count_patches patches, targets, patch_locs, img_nos = outmap.extract_patches_batch( minibatch_images, new_preds, start, conv_targets) new_patches, new_targets, new_img_nos, new_patch_locs = self.balance_face_table( dataset, i, face_tables[i], random_patch_extractor, grid_patch_extractor) patches = list_alternator(new_patches, patches) targets = list_alternator(new_targets, targets) imgnos = list_alternator(new_img_nos, img_nos) patch_locs = list_alternator(new_patch_locs, patch_locs) stop = count_patches + patches.shape[0] OutputMapFile.save_output_map(h5file=out_file, patches=patches, imgnos=imgnos, ilocs=patch_locs, targets=targets, start=start, stop=stop) count_patches = stop else: # Do the patchwise patch extraction # Decide with patches to send to the next cascade member minibatch_patches = data[0] minibatch_targets = data[1] minibatch_imgnos = data[2] minibatch_imglocs = data[3] assert receptive_field_size == cropped_patches_ds.img_shape, "Size of receptive field of the model is different from the size of the patches!" batch_act = fn(minibatch_patches) batch_act = np.concatenate( [elem.reshape(elem.size) for elem in batch_act], axis=0) output_map = self.create_output_map( minibatch_targets, minibatch_imglocs) new_preds = self._check_threshold_non_max_suppression( output_map, self.reject_probabilities[cascade_no], self.non_max_radius) #Remove the dead patches self.remove_nonfaces(new_preds, face_tables[i], imgnos=minibatch_imgnos, images=minibatch_patches, imglocs=minibatch_imglocs, img_targets=minibatch_targets) start = count_patches stop = count_patches + patches.shape[0] new_patches, new_targets, new_img_nos, new_patch_locs = self.balance_face_table( dataset, i, face_tables[i], random_patch_extractor, grid_patch_extractor) patches = list_alternator(new_patches, patches) targets = list_alternator(new_targets, targets) imgnos = list_alternator(new_img_nos, img_nos) patch_locs = list_alternator(new_patch_locs, patch_locs) OutputMapFile.save_output_map(out_file, minibatch_patches, new_preds, img_nos, targets=targets, start=start, stop=stop) count_patches = stop h5_path = os.path.join(model_file_path, name) out_files.append(h5_path) return out_files
def save_posterior_patches(self, full_datasets, cascade_no, cropped_patches_dataset=None, face_tables=None, size_of_rf=None): """ Save the information about the cascade and the fprop to here. """ if cascade_no > 1: assert cropped_patches_dataset is not None if face_tables is None: face_tables = [] for dataset in datasets: face_tables.append(dataset.face_table) mode = SequentialSubsetIterator targets = True count_patches = 0 out_files = [] ms_model = self.predictors[cascade_no] n_nonface_patches = 100 for i in xrange(self.nlevels): name = "cascade_%d_lvl_%d" % (cascade_no, i) input_space = ms_model.models[i].get_input_space() models = ms_model.models X = input_space.make_theano_batch() # doesn't support topo yets if X.ndim > 2: assert False dataset = datasets[i] if dataset.iter_mode == "train": dataset.set_iter_mode("fprop") outputs = models[i].fprop(X, cascade_no) batch_size = models[i].batch_size fn = theano.function([X], outputs) n_examples = X.shape[0] model_file_path = ms_model.model_file_path receptive_field_size = models[i].receptive_field_size out_file, gcols = OutputMapFile.create_file(model_file_path, name, n_examples=n_examples, out_shape=(n_examples, size_of_rf[0], size_of_rf[1])) outmap = OutputMap(receptive_field_size, self.img_shape, models.stride) random_patch_extractor = ExtractRandomPatches(patch_shape=size_of_rf, num_patches=n_nonface_patches) grid_patch_extractor = ExtractGridPatches(patch_shape=size_of_rf, patch_stride=models.stride) count_patches = 0 if cascade_no == 0 or models.isconv: dataset = full_datasets else: dataset = cropped_patches_ds #This is a counter for the cascade i where i > 0. for data in dataset.iterator(batch_size=batch_size, mode=mode, targets=True): if cascade_no == 0 or models.isconv: #Extract the patches from the full size image according to the convolution #operation with respect to a specific sized receptive fields #and stride. minibatch_images = data[0] conv_targets = data[1] imgnos = data[2] batch_act = fn(minibatch_images) batch_act = np.concatenate([elem.reshape(elem.size) for elem in batch_act], axis=0) new_preds = self._check_threshold_non_max_suppression(batch_act, self.reject_probabilities[cascade_no], self.non_max_radius) #Remove the nonface entries: self.remove_nonfaces(new_preds, face_tables[i], imgnos=imgnos, images=minibatch_images) start = count_patches patches, targets, patch_locs, img_nos = outmap.extract_patches_batch(minibatch_images, new_preds, start, conv_targets) new_patches, new_targets, new_img_nos, new_patch_locs = self.balance_face_table(dataset, i, face_tables[i], random_patch_extractor, grid_patch_extractor) patches = list_alternator(new_patches, patches) targets = list_alternator(new_targets, targets) imgnos = list_alternator(new_img_nos, img_nos) patch_locs = list_alternator(new_patch_locs, patch_locs) stop = count_patches + patches.shape[0] OutputMapFile.save_output_map(h5file=out_file, patches=patches, imgnos=imgnos, ilocs=patch_locs, targets=targets, start=start, stop=stop) count_patches = stop else: # Do the patchwise patch extraction # Decide with patches to send to the next cascade member minibatch_patches = data[0] minibatch_targets = data[1] minibatch_imgnos = data[2] minibatch_imglocs = data[3] assert receptive_field_size == cropped_patches_ds.img_shape, "Size of receptive field of the model is different from the size of the patches!" batch_act = fn(minibatch_patches) batch_act = np.concatenate([elem.reshape(elem.size) for elem in batch_act], axis=0) output_map = self.create_output_map(minibatch_targets, minibatch_imglocs) new_preds = self._check_threshold_non_max_suppression(output_map, self.reject_probabilities[cascade_no], self.non_max_radius) #Remove the dead patches self.remove_nonfaces(new_preds, face_tables[i], imgnos=minibatch_imgnos, images=minibatch_patches, imglocs=minibatch_imglocs, img_targets=minibatch_targets) start = count_patches stop = count_patches + patches.shape[0] new_patches, new_targets, new_img_nos, new_patch_locs = self.balance_face_table(dataset, i, face_tables[i], random_patch_extractor, grid_patch_extractor) patches = list_alternator(new_patches, patches) targets = list_alternator(new_targets, targets) imgnos = list_alternator(new_img_nos, img_nos) patch_locs = list_alternator(new_patch_locs, patch_locs) OutputMapFile.save_output_map(out_file, minibatch_patches, new_preds, img_nos, targets=targets, start=start, stop=stop) count_patches = stop h5_path = os.path.join(model_file_path, name) out_files.append(h5_path) return out_files
def balance_face_table(self, dataset, cascade_no, random_patch_extractor, grid_patch_extractor, face_table=None, size_of_rf=None): """ TODO: Check the dead faces and replace them with new ones. """ assert face_table is not None last_face_no = face_table.get_last_face_no() last_nonface_no = face_table.get_last_nonface_no() dead_faces = face_table.get_dead_faces() dead_nonfaces = face_table.get_dead_nonfaces() face_imgnos = [] nonface_imgnos = [] non_face_targets = [] face_targets = [] new_nonface_patches = [] new_face_patches = [] nonface_plocs = [] face_plocs = [] ###Replace the dead faces with the new ones#### for dead_face in dead_faces: last_face_no += 1 face_row = face_table.get_row(dead_face) face_table.simple_update_face_table(face_row, last_face_no, 1, 1) face_patches, face_targets_, patch_locs = grid_patch_extractor.apply( dataset, start=last_face_no) new_face_patches.append(face_patches) face_plocs.append(patch_locs) face_targets.extend(face_targets_.flatten()) face_imgno = np.array( list(its.repeat(last_face_no, face_targets_.shape[0]))) face_imgnos.extend(face_imgno) for dead_nonface in dead_nonfaces: last_nonface_no += 1 face_row = face_table.get_row(dead_nonface) face_table.simple_update_face_table(face_row, last_nonface_no, 0, 1) nonface_patches, patch_targets, patch_locs = grid_patch_extractor.apply( dataset, start=last_nonface_no) new_nonface_patches.append(nonface_patches.flatten()) non_face_targets.append(patch_targets.flatten()) nonface_plocs.append(patch_locs) nonface_imgno = np.array( list(its.repeat(last_nonface_no, patch_targets.shape[0]))) nonface_imgnos.extend(nonface_imgno) patches = list_alternator(new_face_patches, new_nonface_patches) targets = list_alternator(face_targets, non_face_targets) imgnos = list_alternator(face_imgnos, nonface_imgnos) plocs = list_alternator(face_plocs, nonface_plocs) return patches, targets, imgnos, plocs
def balance_face_table(self, dataset, cascade_no, random_patch_extractor, grid_patch_extractor, face_table=None, size_of_rf=None): """ TODO: Check the dead faces and replace them with new ones. """ assert face_table is not None last_face_no = face_table.get_last_face_no() last_nonface_no = face_table.get_last_nonface_no() dead_faces = face_table.get_dead_faces() dead_nonfaces = face_table.get_dead_nonfaces() face_imgnos = [] nonface_imgnos = [] non_face_targets = [] face_targets = [] new_nonface_patches = [] new_face_patches = [] nonface_plocs = [] face_plocs = [] ###Replace the dead faces with the new ones#### for dead_face in dead_faces: last_face_no += 1 face_row = face_table.get_row(dead_face) face_table.simple_update_face_table(face_row, last_face_no, 1, 1) face_patches, face_targets_, patch_locs = grid_patch_extractor.apply(dataset, start=last_face_no) new_face_patches.append(face_patches) face_plocs.append(patch_locs) face_targets.extend(face_targets_.flatten()) face_imgno = np.array(list(its.repeat(last_face_no, face_targets_.shape[0]))) face_imgnos.extend(face_imgno) for dead_nonface in dead_nonfaces: last_nonface_no += 1 face_row = face_table.get_row(dead_nonface) face_table.simple_update_face_table(face_row, last_nonface_no, 0, 1) nonface_patches, patch_targets, patch_locs = grid_patch_extractor.apply(dataset, start=last_nonface_no) new_nonface_patches.append(nonface_patches.flatten()) non_face_targets.append(patch_targets.flatten()) nonface_plocs.append(patch_locs) nonface_imgno = np.array(list(its.repeat(last_nonface_no, patch_targets.shape[0]))) nonface_imgnos.extend(nonface_imgno) patches = list_alternator(new_face_patches, new_nonface_patches) targets = list_alternator(face_targets, non_face_targets) imgnos = list_alternator(face_imgnos, nonface_imgnos) plocs = list_alternator(face_plocs, nonface_plocs) return patches, targets, imgnos, plocs