def deserialize(self, info): ''' Restore serialized state of Feature from dict Arguments --------- info: dict | str Restore keyword/value pairs from dict. Alternatively restore dict from named file. ''' if info is None: return if isinstance(info, str): with open(info, 'rb') as f: info = json.load(f) if 'model' in info.keys(): if info['model'] == 'LMHologram': self.model = LMHologram() self.model.properties = { k: info[k] for k in self.model.properties.keys() } if 'coordinates' in info.keys(): if hasattr(self.model, 'coordinates'): args = info['coordinates'] self.model.coordinates = coordinates(*args) if 'data' in info.keys(): data = np.array(info['data']) if 'shape' in info.keys(): data = data.reshape(info['shape']) self.data = data if 'label' in info.keys(): self.label = info['label']
def fit(data, a_p, n_p, z_p, plot=False, return_img=False): feature = Feature(model=LMHologram()) px = int(np.sqrt(data.size)) ins = feature.model.instrument ins.wavelength = wv ins.magnification = mag ins.n_m = n_m feature.optimizer.mask.settings['distribution'] = 'fast' feature.optimizer.mask.settings['percentpix'] = .1 feature.model.coordinates = coordinates((px, px), dtype=np.float32) p = feature.model.particle p.r_p = [px // 2, px // 2, z_p / mag] p.a_p = a_p p.n_p = n_p feature.data = np.array(data) result = feature.optimize(method='lm', verbose=False) print(feature.model.hologram().shape) print(result) if plot: plt.imshow(np.hstack([data, feature.model.hologram().reshape(shape)])) plt.show() a_fit = feature.model.particle.a_p n_fit = feature.model.particle.n_p z_fit = feature.model.particle.z_p if return_img: return feature.model.hologram(), a_fit, n_fit, z_fit else: return a_fit, n_fit, z_fit
def data(self, data): self._data = data / self.background self._data /= np.mean(self._data) self.image.setImage(self._data) self.data_coords = coordinates(data.shape) self.ui.x_p.setRange(0, data.shape[1] - 1) self.ui.y_p.setRange(0, data.shape[0] - 1) self.ui.bbox.setRange(0, min(data.shape[0] - 1, data.shape[1] - 1)) self.updateDataProfile()
#from pylorenzmie.theory.cuholo import cucoordinates as coordinates import cv2 import matplotlib.pyplot as plt from time import time a = Feature(model=LMHologram()) # Read example image img = cv2.imread('../tutorials/crop.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = img / np.mean(img) shape = img.shape a.data = img # Instrument configuration a.model.coordinates = coordinates(shape, dtype=np.float32) ins = a.model.instrument ins.wavelength = 0.447 ins.magnification = 0.048 ins.n_m = 1.34 # Initial estimates for particle properties p = a.model.particle p.r_p = [shape[0] // 2, shape[1] // 2, 330.] p.a_p = 1.1 p.n_p = 1.4 # add errors to parameters p.r_p += np.random.normal(0., 1, 3) p.z_p += np.random.normal(0., 30, 1) p.a_p += np.random.normal(0., 0.1, 1) p.n_p += np.random.normal(0., 0.04, 1)
def crop_feature(img_list=[], xy_preds=[], new_shape=(201, 201)): ''' img_list: list of images (np.ndarray) with shape: old_shape xy_preds is the output of a yolo prediction: list of list of dicts xy_preds[i] corresponds to img_list[i] output: list of list of feature objects ''' numfiles = len(img_list) numpreds = len(xy_preds) if numfiles != numpreds: raise Exception( 'Number of images: {} does not match number of predictions: {}'. format(numfiles, numpreds)) frame_list = [] est_input_img = [] est_input_scale = [] for num in range(numfiles): feature_list = [] img_local = img_list[num] preds_local = xy_preds[num] for pred in preds_local: f = Feature(model=LMHologram()) conf = pred["conf"] * 100 (x, y, w, h) = pred["bbox"] xc = int(np.round(x)) yc = int(np.round(y)) ext = np.amax([int(w), int(h)]) if ext <= new_shape[0]: crop_shape = new_shape scale = 1 else: scale = int(np.floor(ext / new_shape[0]) + 1) crop_shape = np.multiply(new_shape, scale) cropped, corner1 = crop_center(img_local, (xc, yc), crop_shape) cropped = cropped[:, :, 0] est_img = cropped[::scale, ::scale] est_input_img.append(est_img) est_input_scale.append(scale) newcenter = [int(x) for x in np.divide(crop_shape, 2)] ext_shape = (ext, ext) data, corner2 = crop_center(cropped, newcenter, ext_shape) corner = np.add(corner1, corner2) data = np.array(data) / 100. f.data = data coords = coordinates(shape=ext_shape, corner=corner) f.model.coordinates = coords f.model.particle.x_p = x f.model.particle.y_p = y feature_list.append(f) feature_list = np.array(feature_list) frame_list.append(feature_list) frame_list = np.array(frame_list) frlistsize = 0 for frame in frame_list: frlistsize += len(frame) est_input_img = np.array(est_input_img) est_input_scale = np.array(est_input_scale) if frlistsize != len(est_input_img): print('error in output sizes') print('Frame list size:', frlistsize) print('Estimator input size:', len(est_input_img)) return frame_list, est_input_img, est_input_scale