def predict_batch(self,images,type_class): images_predictions = tf.TensorArray(tf.float32,size=0,dynamic_size=True) ys = tf.TensorArray(tf.float32,size=0,dynamic_size=True) matched_images = tf.TensorArray(tf.float32,size=0,dynamic_size=True) index = 0 basis = tf.convert_to_tensor([0,1],dtype=tf.float32) for i in tf.range(len(images)): gen_image = data_access.normalize(data_access.de_standardize(images[i])) img = tf.expand_dims(gen_image,axis=0) c = self.classifier(img) if(self.num_classes == 2): x = tf.subtract(c,basis) w_list = tf.abs(x) else: w_list = c w_list = tf.reshape(w_list,(w_list.shape[1],)) images_predictions = images_predictions.write(i,w_list) y_list = tf.convert_to_tensor(type_class,dtype=tf.float32) ys = ys.write(i,y_list) if(tf.reduce_all(tf.equal(w_list,y_list))): matched_images = matched_images.write(index,images[i]) index +=1 return images_predictions.stack(), ys.stack(),matched_images.stack()
def predict_batch(self,images,type_class): """ Classify each image received and prepare for loss function :param images: - images tensors :param type_class: - class chosen to influence generator. Its must be a number """ images_predictions = tf.TensorArray(tf.float32,size=0,dynamic_size=True) ys = tf.TensorArray(tf.float32,size=0,dynamic_size=True) matched_images = tf.TensorArray(tf.float32,size=0,dynamic_size=True) index = 0 basis = tf.convert_to_tensor([0,1],dtype=tf.float32) for i in tf.range(len(images)): gen_image = data_access.normalize(data_access.de_standardize(images[i])) img = tf.expand_dims(gen_image,axis=0) c = self.classifier(img) if(self.num_classes == 2): x = tf.subtract(c,basis) w_list = tf.abs(x) else: w_list = c w_list = tf.reshape(w_list,(w_list.shape[1],)) images_predictions = images_predictions.write(i,w_list) y_list = tf.one_hot(type_class,self.num_classes) ys = ys.write(i,y_list) if(tf.reduce_all(tf.equal(w_list,y_list))): matched_images = matched_images.write(index,images[i]) index +=1 return images_predictions.stack(), ys.stack(),matched_images.stack()
def generate_images(self,number_of_samples,directory): seed = tf.random.normal([number_of_samples, self.random_noise_size]) images = self.generator(seed) if self.classifier is not None: predictions = self.classifier(data_access.normalize(data_access.de_standardize(images))) data_access.produce_generate_figure(directory,images,predictions,class_names) else: data_access.store_images_seed(directory,images,'gen_images','gan')
def predict_batch(self, images, type_class): images_predictions = tf.TensorArray(tf.float32, size=10, dynamic_size=True) ys = tf.TensorArray(tf.float32, size=10, dynamic_size=True) matched_images = tf.TensorArray(tf.float32, size=0, dynamic_size=True) index = 0 for i in tf.range(len(images)): gen_image = data_access.normalize( data_access.de_standardize(images[i])) img = tf.expand_dims(gen_image, axis=0) c_type = self.classifier_m.predict_image(img) w_list = tf.one_hot(c_type, self.num_classes) w_list = tf.reshape(w_list, (w_list.shape[1], )) images_predictions = images_predictions.write(i, w_list) y_list = tf.one_hot(type_class, self.num_classes) ys = ys.write(i, y_list) if (tf.reduce_all(tf.equal(w_list, y_list))): matched_images = matched_images.write(index, images[i]) index += 1 return images_predictions.stack(), ys.stack(), matched_images.stack()
def generate_images(self, number_of_samples, directory): seed = tf.random.normal([number_of_samples, 100]) images = self.generator(seed) predictions = self.classifier_m.predict_image_vector( data_access.normalize(data_access.de_standardize(images))) data_access.produce_generate_figure('imgs', images, predictions)