def __data_generation(self, list_ids_temp): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization shape = (len(list_ids_temp), *self.dim) X = np.empty(shape) y = np.empty(shape[:-1], dtype=np.int8) if self.use_ram: shape = (len(list_ids_temp), 224, 224, 4) X = np.empty(shape) y = np.empty(shape[:-1], dtype=np.int8) for i, (case_index, slice_index) in enumerate(list_ids_temp): data, label = self.cases[case_index] slice = data[slice_index] label = label[slice_index] X[i], y[i] = self.augmentor(slice, label) return X, np.expand_dims(y, -1) # print('Generating data for indices', list_IDs_temp) # Generate data for i, (patient_id, slice_index) in enumerate(list_ids_temp): # Store sample dic = self.reader.get_case(patient_id) slice = np.empty(self.dim) slice[:, :, 0] = self.normalize(dic['t1ce'])[:, :, slice_index] slice[:, :, 1] = self.normalize(dic['t1'])[:, :, slice_index] slice[:, :, 2] = self.normalize(dic['t2'])[:, :, slice_index] slice[:, :, 3] = self.normalize(dic['flair'])[:, :, slice_index] X[i], y[i] = self.augmentor(slice, dic['labels'][:, :, slice_index]) y = np.expand_dims(y, axis=-1) return preprocess(X, y, self.config)
def __data_generation(self, list_ids_temp): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization if self.use_ram: shape = (len(list_ids_temp), 224, 224, 4) X = np.empty(shape) y = np.empty(shape[:-1], dtype=np.int8) for i, (case_index, slice_index) in enumerate(list_ids_temp): data, label = self.cases[case_index] rand_num = random.random( ) if self.use_all_cross_sections else 0 if rand_num < 0.33: slice = data[slice_index] label = label[slice_index] elif rand_num < 0.66: slice = data[:, slice_index + 34, :, :] label = label[:, slice_index + 34, :] else: slice = data[:, :, slice_index + 34, :] label = label[:, :, slice_index + 34] X[i], y[i] = self.augmentor(slice, label) return X, np.expand_dims(y, -1) shape = (len(list_ids_temp), *self.dim) X = np.empty(shape) y = np.empty(shape[:-1], dtype=np.int8) # print('Generating data for indices', list_IDs_temp) # Generate data for i, (patient_id, slice_index) in enumerate(list_ids_temp): # Store sample dic = self.reader.get_case(patient_id) slice = np.empty(self.dim) slice[:, :, 0] = self.normalize(dic['t1ce'])[:, :, slice_index] slice[:, :, 1] = self.normalize(dic['t1'])[:, :, slice_index] slice[:, :, 2] = self.normalize(dic['t2'])[:, :, slice_index] slice[:, :, 3] = self.normalize(dic['flair'])[:, :, slice_index] X[i], y[i] = (slice, dic['labels'][:, :, slice_index]) y = np.expand_dims(y, axis=-1) X, y = preprocess(X, y) y = np.squeeze(y, axis=-1) for i in range(X.shape[0]): X[i], y[i] = self.augmentor(X[i], y[i])
def GetSteering(filename): """ Get a tcpflow log and extract steering values received from and sent to sim Inputs filename: string, name of tcpflow log """ # open file sa = [] # initialize prediction pred = '' f = open(filename, "r") file = f.read() try: #readline = f.read() lines = file.splitlines() for line in lines: #print(line) start = line.find('{') if (start == -1): continue jsonstr = line[start:] #print(jsonstr) jsondict = json.loads(jsonstr) if "steering" in jsondict: # predicted pred = jsondict['steering'] if "steering_angle" in jsondict: # actual act = jsondict['steering_angle'] # save pair, only keep last pred in case two were send as it does happen i.e.: # 127.000.000.001.59460-127.000.000.001.09091: {"msg_type": "control", "steering": "-0.071960375", "throttle": "0.08249988406896591", "brake": "0.0"} # 127.000.000.001.59460-127.000.000.001.09091: {"msg_type": "control", "steering": "-0.079734944", "throttle": "0.08631626516580582", "brake": "0.0"} # 127.000.000.001.09091-127.000.000.001.59460: {"msg_type":"telemetry","steering_angle":-0.07196037,(...) if (len(pred) > 0): sa.append([float(pred), act]) pred = '' # need to save this image # deal with image later, sort out plot first imgString = jsondict["image"] image = Image.open(BytesIO(base64.b64decode(imgString))) img_arr = np.asarray(image, dtype=np.float32) img_arr_proc = preprocess(img_arr) stitch = stitchImages(img_arr, img_arr_proc, 160, 120) plt.imshow(stitch) except Exception as e: print("Exception raise: " + str(e)) # file should be automatically closed but will close for good measure f.close()
def __init__(self, reader, num_slices, list_ids, dim, config, augmentor, use_all_cross_sections): 'Initialization' self.augmentor = augmentor self.reader = reader self.list_ids = list(itertools.product(list_ids, range(num_slices))) self.list_ids = sorted(self.list_ids) self.use_all_cross_sections = use_all_cross_sections batch, height, width, channels = dim self.dim = (height, width, channels) self.batch_size = batch self.n_channels = channels self.cases = [] self.use_ram = config.use_ram self.length = int(np.ceil(len(self.list_ids) / self.batch_size)) if self.use_ram: self.list_ids = list( itertools.product(range(len(list_ids)), range(num_slices))) for index, id in tqdm(enumerate(list_ids), total=len(list_ids), ncols=60): case = reader.get_case(id) data = np.stack([ self.normalize(case['flair']), self.normalize(case['t1']), self.normalize(case['t1ce']), self.normalize(case['t2']) ], axis=-1) labels = case['labels'] data = np.transpose(data, axes=[2, 0, 1, 3]) labels = np.transpose(np.expand_dims(labels, -1), [2, 0, 1, 3]) data, labels = preprocess( data, labels, pad_batch=self.use_all_cross_sections) labels = np.squeeze(labels, -1) data = data.astype(np.float16) labels = labels.astype(np.uint8) self.cases.append((data, labels))
def predict_drive(datapath, modelpath, nc): """ Generate predictions from a model for a dataset Inputs datapath: string, path to data modelpath: string, path to trained model nc: steering angle normalization constant """ print("loading model", modelpath) model = load_model(modelpath) # In this mode, looks like we have to compile it # NB this is a bit tricky, do need to use optimizer and loss function used to train model? model.compile("sgd", "mse") files = get_files(datapath, True) outputs = [] for fullpath in files: frame_number = os.path.basename(fullpath).split("_")[0] json_filename = os.path.join(os.path.dirname(fullpath), "record_" + frame_number + ".json") data = load_json(json_filename) # ground truth steering = float(data["user/angle"]) # normalized - divided by nc by simulator # prediction image = cv2.imread(fullpath) # The image will be 1. resized to expected pre-processing size and 2.resized to expected # size to be presented to network. This is network architecture and dataset dependant and # currently managed in conf.py image = preprocess(image) image = image.reshape((1,) + image.shape) mod_pred = model.predict(image) # append prediction and ground truth to list outputs.append([mod_pred[0][1], steering]) # get goodness of steer sarr = np.asarray(outputs) p = sarr[:, 0] g = sarr[:, 1] gs = gos(p,g,nc) print(gs) # def plotSteeringAngles(p, g=None, n=1, save=False, track= "Track Name", mname="model name", title='title'): gss = "{:.2f}".format(gs) modelpath = modelpath.split('/') datapath = datapath.split('/') plotSteeringAngles(p, g, nc, True, datapath[-2], modelpath[-1], 'Gs ' + gss)
def __data_generation(self, case_index): 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels) # Initialization case = self.reader.get_case(self.list_ids[case_index]) data = np.stack([self.normalize(case['flair']), self.normalize(case['t1']), self.normalize(case['t1ce']), self.normalize(case['t2'])], axis=-1) labels = case['labels'] data = np.transpose(data, axes=[2, 0, 1, 3]) labels = np.transpose(np.expand_dims(labels, -1), axes=[2, 0, 1, 3]) data, labels = preprocess(data, labels, True) labels = np.squeeze(labels, -1) data = data.astype(np.float16) labels = labels.astype(np.uint8) return data, labels
def MakeVideo(filename, model, preproc=False): """ Make video from tcpflow logged images. video.avi is written to disk Inputs filename: string, name of tcpflow log model: name of model to stamp onto video preproc: boolean, show preprocessed image next to original Output none """ # video name video_name = model + '.avi' VIDEO_WIDTH, VIDEO_HEIGHT = 800, 600 IMAGE_WIDTH, IMAGE_HEIGHT = 800, 600 if (preproc == True): # wide angle VIDEO_WIDTH = IMAGE_WIDTH * 2 video = cv2.VideoWriter(video_name, 0, 11, (VIDEO_WIDTH, VIDEO_HEIGHT)) # assumed 11fps # font font = cv2.FONT_HERSHEY_SIMPLEX # normalization constant # open file sa = [] # initialize prediction pred = '' f = open(filename, "r") file = f.read() try: #readline = f.read() lines = file.splitlines() for line in lines: #print(line) start = line.find('{') if (start == -1): continue jsonstr = line[start:] #print(jsonstr) jsondict = json.loads(jsonstr) if "steering" in jsondict: # predicted pred = jsondict['steering'] if "steering_angle" in jsondict: # actual act = jsondict['steering_angle'] # save pair, only keep last pred in case two were send as it does happen i.e.: # 127.000.000.001.59460-127.000.000.001.09091: {"msg_type": "control", "steering": "-0.071960375", "throttle": "0.08249988406896591", "brake": "0.0"} # 127.000.000.001.59460-127.000.000.001.09091: {"msg_type": "control", "steering": "-0.079734944", "throttle": "0.08631626516580582", "brake": "0.0"} # 127.000.000.001.09091-127.000.000.001.59460: {"msg_type":"telemetry","steering_angle":-0.07196037,(...) if (len(pred) > 0): # save steering angles sa.append([float(pred), act]) pred = '' # need to save this image # process image imgString = jsondict["image"] # decode string image = Image.open(BytesIO(base64.b64decode(imgString))) # try to convert to jpg #image = np.array(image) # sky colour turns orange (TODO investigate) # save image.save('frame.jpg') # reopen with user-friendlier cv2 image = cv2.imread('frame.jpg') # 120x160x3 image_copy = image # resize so we can write some info onto image image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) # add Info to frame cv2.putText(image, model, (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA) # Predicted steering angle pst = sa[len(sa) - 1][0] pst *= conf.norm_const simst = "Predicted steering angle: {:.2f}".format(pst) cv2.putText(image, simst, (50, 115), font, 1, (255, 255, 255), 2, cv2.LINE_AA) # create a preprocessed copy to compare what simulator generates to what network "sees" if (preproc == True): # wide angle image2 = preprocess(image_copy) image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) cv2.putText(image2, 'Network Image', (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA) # concatenate if (preproc == True): # wide angle cimgs = np.concatenate((image, image2), axis=1) image = cimgs # model name # model video.write(image) pred = '' except Exception as e: print("Exception raise: " + str(e)) # file should be automatically closed but will close for good measure f.close() cv2.destroyAllWindows() video.release() return "DummyName.mp4"