def process_dataset(location): temp_array = np.load(location) temp_array = temp_array.reshape((temp_array.shape[0], factor, factor, 3)) ret_array = temp_array[:, :, :, 0] ret_array = np.append(ret_array, temp_array[:, :, :, 1], axis=0) ret_array = np.append(ret_array, temp_array[:, :, :, 2], axis=0) ret_array = np.reshape(ret_array, (ret_array.shape[0], ret_array.shape[1], ret_array.shape[2], 1)) return ret_array
def generate(BATCH_SIZE, nice=False): generator = generator_model() generator.compile(loss="binary_crossentropy", optimizer="SGD") generator.load_weights("generator") if nice: discriminator = discriminator_model() discriminator.compile(loss="binary_entropy", optimizer="SGD") discriminator.load_weights("discriminator") noise = np.zeros((BATCH_SIZE * 20, 100)) for i in range(BATCH_SIZE * 10): noise[i, :] = np.random.uniform(-1, 1, 100) generated_images = generator.predict(noise, verbose=1) d_pret = discriminator.predict(generated_images, verbose=1) index = np.arange(0, BATCH_SIZE * 20) index.resize((BATCH_SIZE * 20, 1)) pre_with_index = list(np.append(d_pret, index, axis=1)) pre_with_index.sort(key=lambda x: x[0], reverse=True) nice_images = np.zeros((BATCH_SIZE, 1) + (generated_images.shape[2:]), dtype=np.float32) for i in range(int(BATCH_SIZE)): idx = int(pre_with_index[i][1]) nice_images[i, 0, :, :] = generated_images[idx, 0, :, :] image = combine_images(nice_images) else: noise = np.zeros((BATCH_SIZE, 100)) for i in range(BATCH_SIZE): noise[i, :] = np.random.uniform(-1, 1, 100) generated_images = generator.predict(noise, verbose=1) image = combine_images(generated_images) image = image * 127.5 + 127.5 Image.fromarray(image.astype(np.uint8)).save("generated_image.png")
def predict(self, data, number_of_steps): predictions = np.empty(shape=(number_of_steps,)) data_shape = data.shape for i in range(predictions.shape[0]): predicted_value = self.model.predict(data) predictions[i] = predicted_value.item() # remove first element and add the prediction data = np.reshape(np.append(data[0][1:], predicted_value.item()), newshape=data_shape) return predictions
from keras.callbacks import TensorBoard, Callback from keras.datasets import mnist from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, np from keras.models import Model from keras import backend as K temp_input_array = np.load('../output/output.npy') temp_input_array = temp_input_array.reshape((temp_input_array.shape[0], 28, 28, 3)) test_input_array = np.load('../output/output_test.npy') test_input_array = test_input_array.reshape((test_input_array.shape[0], 28, 28, 3)) input_array = temp_input_array[:, :, :, 0] input_array = np.append(input_array, temp_input_array[:, :, :, 1], axis=0) input_array = np.append(input_array, temp_input_array[:, :, :, 2], axis=0) input_array = np.reshape(input_array, (input_array.shape[0], input_array.shape[1], input_array.shape[2], 1)) test_input_array = test_input_array[:, :, :, 0] test_input_array = np.reshape(test_input_array, (test_input_array.shape[0], test_input_array.shape[1], test_input_array.shape[2], 1)) (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
def evaluate(self, generator, iou_threshold=0.3, score_threshold=0.3, max_detections=100, save_path=None): """ Evaluate a given dataset using a given model. code originally from https://github.com/fizyr/keras-retinanet # Arguments generator : The generator that represents the dataset to evaluate. model : The model to evaluate. iou_threshold : The threshold used to consider when a detection is positive or negative. score_threshold : The score confidence threshold to use for detections. max_detections : The maximum number of detections to use per image. save_path : The path to save images with visualized detections to. # Returns A dict mapping class names to mAP scores. """ # gather all detections and annotations all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())] all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())] for i in range(generator.size()): raw_image = generator.load_image(i) raw_height, raw_width, raw_channels = raw_image.shape # make the boxes and the labels pred_boxes = self.predict(raw_image) score = np.array([box.score for box in pred_boxes]) pred_labels = np.array([box.label for box in pred_boxes]) if len(pred_boxes) > 0: pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width, box.ymax * raw_height, box.score] for box in pred_boxes]) else: pred_boxes = np.array([[]]) # sort the boxes and the labels according to scores score_sort = np.argsort(-score) pred_labels = pred_labels[score_sort] pred_boxes = pred_boxes[score_sort] # copy detections to all_detections for label in range(generator.num_classes()): all_detections[i][label] = pred_boxes[pred_labels == label, :] annotations = generator.load_annotation(i) # copy detections to all_annotations for label in range(generator.num_classes()): all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy() # compute mAP by comparing all detections and all annotations average_precisions = {} for label in range(generator.num_classes()): false_positives = np.zeros((0,)) true_positives = np.zeros((0,)) scores = np.zeros((0,)) num_annotations = 0.0 for i in range(generator.size()): detections = all_detections[i][label] annotations = all_annotations[i][label] num_annotations += annotations.shape[0] detected_annotations = [] for d in detections: scores = np.append(scores, d[4]) if annotations.shape[0] == 0: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) continue overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations) assigned_annotation = np.argmax(overlaps, axis=1) max_overlap = overlaps[0, assigned_annotation] if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) detected_annotations.append(assigned_annotation) else: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) # no annotations -> AP for this class is 0 (is this correct?) if num_annotations == 0: average_precisions[label] = 0 continue # sort by score indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] # compute false positives and true positives false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) # compute recall and precision recall = true_positives / num_annotations precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) # compute average precision average_precision = compute_ap(recall, precision) average_precisions[label] = average_precision return average_precisions