def get_3D_transform(): #with mrcfile.open("cropout_bin4.mrc") as mrc: # small example with mrcfile.open("cropout.mrc") as mrc: # coerce to double realpart = flex.double(mrc.data.astype(np.float64)) # in-place resize to a good multiple of 2 realpart.resize(flex.grid(imax, imax, imax)) complpart = flex.double(flex.grid(imax, imax, imax)) #from IPython import embed; embed() C3D = flex.complex_double(realpart, complpart) from matplotlib import pyplot as plt from scitbx import fftpack #plt.imshow(S2D) #plt.show() #from IPython import embed; embed() print "C3Dfocus", C3D.focus() print C3D FFT = fftpack.complex_to_complex_3d( (C3D.focus()[0], C3D.focus()[1], C3D.focus()[2])) c = FFT.forward(C3D) print c.focus() print c return c
def raw(filename): mrc = mrcfile.open(filename, permissive=True) data = mrc.data arr = np.zeros(shape=data.shape, dtype=data.dtype) arr[:] = data[:] data = 0 return arr
def make_photos(basename, working_directory): """ Convert MRC file with stack of classes to series of scaled PNGs for web viewing. Args: basename (str): name of desired folder within class_images - usually the same name as the mrc file. working_directory (str): the base directory where :py:mod:`live_2d` is working. Returns: str: Directory path with new PNGs written out. """ live2dlog = logging.getLogger("live_2d") if not os.path.isdir(os.path.join(working_directory, "class_images")): os.mkdir(os.path.join(working_directory, "class_images")) if not os.path.isdir( os.path.join(working_directory, "class_images", basename)): os.mkdir(os.path.join(working_directory, "class_images", basename)) photo_dir = os.path.join(working_directory, "class_images", basename) with mrcfile.open( os.path.join(working_directory, "{}.mrc".format(basename)), "r") as stack: for index, item in enumerate(stack.data): imageio.imwrite( os.path.join(photo_dir, "{}.png".format(index + 1)), item) live2dlog.info( f"Exported class averages to web-friendly images stored in {photo_dir}" ) return photo_dir
def get_train_set(img_per_class): files = [] Y = [] for i in classes: for j in range(img_per_class): filename = '../data/SNR003/' + str(i) + '/tomotarget' + str(j) + '.mrc' files.append(filename) Y.append(classes.index(i)) # print(files) c = list(zip(files, Y)) random.shuffle(c) files, Y = zip(*c) count = 0 while True: if count == img_per_class*10: count = 0 f = mrcfile.open(files[count]) x = f.data x = pad(x) # x = norm_3d(x) x = np.expand_dims(x, axis = 0) x = np.expand_dims(x, axis = 4) y = Y[count] y_arr = np.zeros(10) y_arr[y] = 1 y_arr = np.expand_dims(y_arr, axis = 0) # print(x.shape) count += 1 # print(x) yield x, y_arr
def read_mrc(fname, cut_sino): import mrcfile # read mrc file for FEI extended header # angles.mat file contains the angle information proj_geom = {} with mrcfile.open(fname, 'r', permissive=True) as mrc: proj = mrc.data[:] if cut_sino: proj = proj[:, cut_sino:-cut_sino, cut_sino:-cut_sino] proj_geom['type'] = 'parallel3d' proj_geom['DetectorColCount'] = proj.shape[2] proj_geom['DetectorRowCount'] = proj.shape[1] proj_geom['DetectorSpacingX'] = 2. / proj.shape[2] proj_geom['DetectorSpacingY'] = 2. / proj.shape[1] proj_geom['DistanceOriginSource'] = 4.0 import struct proj_geom['ProjectionAngles'] = np.zeros(proj.shape[0]) with open(fname, "rb") as f: for i in range(proj.shape[0]): f.seek(1024 + (i) * 128, 0) byte_float = f.read(4) proj_geom['ProjectionAngles'][i] = struct.unpack( 'f', byte_float)[0] * np.pi / 180 return proj, proj_geom
def intialize(): # initialize program global mrc, img_matrix, shape, nx, ny, nz, df, unit, global_regionid, cube_id, threshold fname = input("choose mrc file:") mrc = mrcfile.open(fname, mode='r+') img_matrix = np.copy(mrc.data) img_matrix = gaussian_filter(img_matrix, sigma=1, mode="constant", cval=0.0, truncate=4.0) img_matrix = gaussian_filter(img_matrix, sigma=1, mode="constant", cval=0.0, truncate=4.0) threshold = img_matrix.mean() nx = mrc.header.nx ny = mrc.header.ny nz = mrc.header.nz shape = (nx, ny, nz) # img_matrix = gaussian_filter(img_matrix, sigma=1, mode="constant", cval=0.0, truncate=4.0) unit = int(math.sqrt(nx)) df = pd.DataFrame({"Name": [fname]}) df['region number in divide'] = "8/8" global_regionid = -1
def read_mrc(image_path): with mrcfile.open(image_path, permissive=True) as mrc: mrc_image_data = np.copy(mrc.data) mrc_image_data = np.squeeze(mrc_image_data) mrc_image_data = np.flipud(mrc_image_data) return mrc_image_data
def mrc_to_tiff(mrc: str, scaling_factor: float, outdir: str = None): outdir = Path(outdir) print("Loading mrc file...") with mrcfile.open(mrc) as fmrc: print("Starting conversion to .tiff\n") futures = [] pbar = tqdm.tqdm(total=len(fmrc.data)) with concurrent.futures.ThreadPoolExecutor( max_workers=WORKERS) as executor: for i, image in enumerate(fmrc.data): out = outdir / f"mrc_{i:05d}.tiff" futures.append( executor.submit( convert, image=image, scaling_factor=scaling_factor, out=out, callback=pbar.update, )) pbar.close() print() for future in futures: ret = future.result()
def save_image_k3(mrc_name, height=494): try: micrograph = mrcfile.open(mrc_name, permissive=True).data if len(micrograph.shape) == 3: micrograph = micrograph.reshape( (micrograph.shape[1], micrograph.shape[2])) else: micrograph = micrograph new_img = scale_image(micrograph, height) short_edge = min( np.array(new_img).shape[0], np.array(new_img).shape[1]) new_img_left = crop_left(np.array(new_img), short_edge, short_edge) new_img_right = crop_right(np.array(new_img), short_edge, short_edge) new_img.save( os.path.join('MicAssess', 'jpgs', 'data', (os.path.basename(mrc_name)[:-4] + '.jpg'))) new_img_left.save( os.path.join('MicAssess', 'k3_left', 'data', (os.path.basename(mrc_name)[:-4] + '.jpg'))) new_img_right.save( os.path.join('MicAssess', 'k3_right', 'data', (os.path.basename(mrc_name)[:-4] + '.jpg'))) except ValueError: print('Warning - Having trouble converting this file:', mrc_name) pass
def mrc2data(mrc_filename): """ mrc2data """ mrc = mrcfile.open(mrc_filename, mode='r+') data = mrc.data mrc.close() return data
def _read(self): with mrcfile.open(self.filepath) as mrc: im = mrc.data.astype('double') # For multiple mrc files, mrcfile returns an ndarray with (shape n_images, height, width) # swap axes 0 and 2 so we get the more natural (height, width, n_images) if im.ndim == 3: im = np.swapaxes(im, 0, 2) self.original_im = im # Discard outer pixels im = im[ self.margin_top: -self.margin_bottom if self.margin_bottom is not None else None, self.margin_left: -self.margin_right if self.margin_right is not None else None ] if self.square: side_length = min(im.shape[0], im.shape[1]) im = im[:side_length, :side_length] if self.shrink_factor is not None: size = tuple((np.array(im.shape) / config.apple.mrc_shrink_factor).astype(int)) im = np.array(Image.fromarray(im).resize(size, Image.BICUBIC)) if self.gauss_filter_size is not None: im = signal.correlate( im, Micrograph.gaussian_filter(self.gauss_filter_size, self.gauss_filter_sigma), 'same' ) self.im = im.astype('double') self.shape = im.shape
def execute(paths): threshold = get_threshold(paths) experimental_map = mrcfile.open(paths['cleaned_map'], mode='r') experimental_data = deepcopy(experimental_map.data) # Remove low valued data and translate the higher values down to zero. experimental_data[experimental_data < 0] = 0 # experimental_data = percentile_filter(experimental_data, numpy.shape(experimental_data), 5) # Change all values < threshold to 0 experimental_data[experimental_data < threshold] = 0 # translate data to have min = 0 experimental_data[experimental_data > 0] -= threshold # normalize data with percentile value percentile = numpy.percentile( experimental_data[numpy.nonzero(experimental_data)], 60) experimental_data /= percentile # Get rid of the very high-intensity voxels by setting them to 95-percentile percentile_98 = numpy.percentile( experimental_data[numpy.nonzero(experimental_data)], 98) experimental_data[experimental_data > percentile_98] = percentile_98 # Print the normalized file to disk. with mrcfile.new(paths['normalized_map'], overwrite=True) as mrc: mrc.set_data(experimental_data) mrc.header.origin = experimental_map.header.origin mrc.close()
def run(mrcin, prefix=None): f = mrcfile.open(mrcin) if prefix is None: prefix = os.path.splitext(os.path.basename(mrcin))[0] data = f.data if data.ndim == 2: data = data.reshape(-1, *data.shape) print "MRC file loaded" print " shape = %s" % (data.shape,) print " dtype = %s" % data.dtype print for i in xrange(data.shape[0]): size2, size1 = data[i].shape # XXX really? cbfout = "%s_%.3d.cbf"%(prefix, i+1) print "Writing %s" % cbfout cbf.save_numpy_data_as_cbf(data[i].flatten(), size1, size2, "%s:%d"%(mrcin, i+1), cbfout, pilatus_header=""" # Detector: %(detname)s # Pixel_size %(pixelsize)e m x %(pixelsize)e m # Wavelength %(wavelength)f A # Detector_distance %(distance)f m # Beam_xy (%(beamx)f, %(beamy)f) pixels # Start_angle %(start_angle)%.4f deg. # Angle_increment %(angle_inc).4f deg. """ % dict(detname="????", pixelsize=75.e-6, wavelength=1, distance=300.e-3, beamx=size1/2., beamy=size2/2., start_angle=0, angle_inc=0))
def basicDVreader(image_path, n_channels=3, z_first=True): ''' very simple function to read .dv files as formatted by deltavision microscopes. image_path is the complete file path of the image for deconvolved images, rolloff specifies the width of the border in pixels to be cropped before further processing n_channels (defualt 3) is the number of fluorescence and bright field channels z_first (default True); boolean, is the the order of the .dv tiff stack z- first or channel-first (often, z_first = True for R3D_D3D, z_first = False for R3D) ''' with warnings.catch_warnings(): warnings.simplefilter("ignore") with mrcfile.open(image_path, permissive=True) as dv: dvData = dv.data[:] dvShape = dvData.shape nZslices = int(dvShape[0] / n_channels) dvImage = np.zeros([n_channels, nZslices, dvShape[1], dvShape[2]], dtype='uint16') if z_first: for channel in range(n_channels): dvImage[channel, :, :, :] = dvData[channel * nZslices:channel * nZslices + nZslices, :, :] else: for channel in range(n_channels): dvImage[channel, :, :, :] = dvData[ channel::n_channels, :, :] return dvImage
def write_mrc_from_atoms( path: Path, atoms: mda.AtomGroup, path_out: Path, context: float = 4.0, cut_box=True, keep_data=False, ): """mask a mrc map using an group of atoms""" if not atoms: logger.warning("Cannot crop empty atom selection. No file written") return with mrcfile.open(path) as mrc: voxel, grid, origin, full_data = _get_mrc_properties(mrc) data_mask = _create_voxel_mask(atoms, grid, origin, voxel, context, symmetric=keep_data) data = full_data * data_mask if cut_box: data, origin, voxel = _mrc_cutbox(data, full_data, origin, voxel, keep_full=keep_data) with mrcfile.new(path_out, overwrite=True) as mrc_out: mrc_out.set_data(data.transpose()) mrc_out.voxel_size = tuple(voxel.tolist()) mrc_out.header["origin"] = tuple(origin)
def read_pix_mrc(filename): mrc = mrcfile.open(filename) data = mrc.data arr = np.zeros(shape=data.shape, dtype=np.float32) arr[:] = data[:] data = 0 return arr, mrc.voxel_size
def NoiseGenerator(self, idx): self.args.ProjectionSize = int(self.args.ProjectionSize) if self.args.UseEstimatedNoise == False: image = torch.zeros(self.args.ProjectionSize, self.args.ProjectionSize).normal_() image = image.unsqueeze(0) else: if self.args.dataset == 'Betagal' or self.args.dataset == 'Betagal-Synthetic': with mrcfile.open(self.BackgroundPath + str(idx).zfill(6) + ".mrc") as m: image = np.array(m.data, dtype=np.float32) if self.args.GaussianFilterProjection: image = scipy.ndimage.gaussian_filter( image, self.args.GaussianSigma) image = Tensor(image).unsqueeze(0).cuda() downsampling = image.shape[-1] // self.args.RawProjectionSize if downsampling > 1: image = torch.nn.functional.avg_pool2d(image, kernel_size=downsampling, stride=downsampling, padding=0) image = (image - image.mean((1, 2), keepdim=True)) / image.std( (1, 2), keepdim=True) return image
def save_mrcs(**args): print('Converting mrcs to jpg....') os.chdir(os.path.abspath(os.path.dirname(args['input']))) # navigate to the par dir of input file try: shutil.rmtree(args['output']) except OSError: pass os.mkdir(args['output']) os.mkdir(os.path.join(args['output'], 'data')) avg_mrc = mrcfile.open(os.path.basename(args['input'])).data if len(avg_mrc.shape) == 3: num_part = avg_mrc.shape[0] elif len(avg_mrc.shape) == 2: num_part = 1 for i in range(num_part): new_img = avg_mrc[i,:,:] if np.sum(new_img) > 1e-7 or np.sum(new_img) < -1e-7: new_img = cutbyradius(new_img) new_img = ((new_img-new_img.min())/((new_img.max()-new_img.min())+1e-7)*255).astype('uint8') new_img = Image.fromarray(new_img) new_img = new_img.convert("L") new_img.save(os.path.join(args['output'], 'data', (args['name'] + '_' + str(i+1) + '.jpg')))
def _read(self): with mrcfile.open(self.filepath) as mrc: im = mrc.data.astype('double') # For multiple mrc files, mrcfile returns an ndarray with (shape n_images, height, width) # swap axes 0 and 2 so we get the more natural (height, width, n_images) if im.ndim == 3: im = np.swapaxes(im, 0, 2) self.original_im = im # Discard outer pixels im = im[self.margin_top:-self.margin_bottom if self. margin_bottom is not None else None, self.margin_left:-self.margin_right if self. margin_right is not None else None] if self.square: side_length = min(im.shape) im = im[:side_length, :side_length] if self.shrink_factor is not None: im = misc.imresize(im, 1 / self.shrink_factor, mode='F', interp='cubic') if self.gauss_filter_size is not None: im = signal.correlate( im, Micrograph.gaussian_filter(self.gauss_filter_size, self.gauss_filter_sigma), 'same') self.im = im.astype('double') self.shape = im.shape
def read_mrc(self, mrcfilename=None): if mrcfilename is not None: self.mrcfilename = mrcfilename with mrcfile.open(self.mrcfilename) as emd: nx, ny, nz = emd.header['nx'], emd.header['ny'], emd.header['nz'] x0, y0, z0 = emd.header['origin']['x'], emd.header['origin']['y'],\ emd.header['origin']['z'] dx, dy, dz = emd.voxel_size['x'], emd.voxel_size[ 'y'], emd.voxel_size['z'] xyz = numpy.meshgrid(numpy.arange(x0, x0 + nx * dx, dx), numpy.arange(y0, y0 + ny * dy, dy), numpy.arange(z0, z0 + nz * dz, dz), indexing='ij') xyz = numpy.asarray(xyz) xyz = xyz.reshape(3, nx * ny * nz) xyz = xyz.T self.grid = emd.data.flatten(order='F').reshape(nx, ny, nz) print('Reading density:') print(self.grid.shape) self.grid_coords = xyz.reshape(nx, ny, nz, 3) self.grid_indices = numpy.indices(self.grid.shape) # First to last self.grid_indices = numpy.squeeze(self.grid_indices[..., None].swapaxes( 0, -1)) assert dy == dx and dz == dx self.step = dx self.origin = numpy.asarray([x0, y0, z0])
def read_mrc(self): """Gets and preprocesses micrograph. Reads the micrograph, applies binning and a low-pass filter. Returns: Micrograph image. """ with mrcfile.open(self.filename, mode='r+', permissive=True) as mrc: im = mrc.data.astype('float') # Discard outer pixels im = im[config.apple.mrc.margin_top:-config.apple.mrc.margin_bottom, config.apple.mrc.margin_left:-config.apple.mrc.margin_right] # Make square side_length = min(im.shape) im = im[:side_length, :side_length] im = misc.imresize(im, 1 / config.apple.mrc.shrink_factor, mode='F', interp='cubic') im = signal.correlate( im, PickerHelper.gaussian_filter(config.apple.mrc.gauss_filter_size, config.apple.mrc.gauss_filter_sigma), 'same') return im.astype('double')
def test_rec(): data_expected1 = mrcfile.open("test/rec_expected_gpu.mrc").data data_expected2 = mrcfile.open("test/rec_expected_cpu.mrc").data data1 = mrcfile.open("test/rec_gpu.mrc").data data2 = mrcfile.open("test/rec_cpu.mrc").data diff1 = numpy.max(numpy.abs(data_expected1 - data1)) diff2 = numpy.max(numpy.abs(data_expected1 - data2)) from matplotlib import pylab pylab.imshow((data1 - data2)[100, :, :]) pylab.show() diff3 = numpy.max(numpy.abs(data1 - data2)) assert diff3 == pytest.approx(0) assert diff1 == pytest.approx(0) assert diff2 == pytest.approx(0)
def read_mrc(file): """ Read in a tomogram in MRC or REC format and return its data as a numpy array. """ with mrcfile.open(file, permissive=True) as m: return m.data.astype(np.float32)
def CoordsBackground_Betagal_Std(pathCoords, pathMicrograph, threshold=False, NumberParticles=10, sizeParticle=384, downSample=1): N=sizeParticle M=2*N with mrcfile.open(pathMicrograph, permissive=True) as image: micrograph=torch.Tensor(image.data).cuda() template=torch.ones( sizeParticle,sizeParticle).float().cuda()/(sizeParticle**2) meanSquare= ConvolveTemplate(micrograph, template, downSample )**2 Std=ConvolveTemplate(micrograph**2, template, downSample) heatmap=Std-meanSquare heatmap=heatmap.cpu().numpy() val=np.max(heatmap) heatmap[:M,:]=val heatmap[:,:M]=val heatmap[:,-M:]=val heatmap[-M:,:]=val coords=np.zeros((NumberParticles,2)) for p in range(NumberParticles): ind=aminArray(heatmap) coords[p,0] = ind[1] coords[p,1] = ind[0] heatmap[ind[0]-sizeParticle//4 : ind[0]+sizeParticle//4 , ind[1]-sizeParticle//4 : ind[1]+sizeParticle//4]=val return coords
def calculate_sphericity(inmrc): # read MRC inputmrc = (mrcfile.open(inmrc)).data inputmrc_copy = copy.deepcopy(inputmrc) extended_inputmrc = np.zeros((inputmrc.shape[0]+10,inputmrc.shape[1]+10,inputmrc.shape[2]+10),dtype=np.float) ## Had to extend it before Gaussian filter, else you might edge effects extended_inputmrc[6:6+inputmrc.shape[0], 6:6+inputmrc.shape[1], 6:6+inputmrc.shape[2]] = inputmrc_copy # Gaussian filtering # Sigma=1 works well blurred = gaussian_filter(extended_inputmrc,sigma=1) # Find surfaces using marching cube algorithm verts, faces, normals, values = measure.marching_cubes_lewiner(blurred,level=0.5) ## Fixed thresholded due to Gaussian blurring # Find surface area surface_area = measure.mesh_surface_area(verts,faces) # Find volume blurred[blurred >= 0.5] = 1 blurred[blurred < 0.5] = 0 volume = np.sum(blurred) # Calculate sphericity sphericity = (((pi)**(1/3))*((6*volume)**(2/3)))/(surface_area) return sphericity
def get_test_set(img_per_class): files = [] Y = [] for i in classes: for j in range(img_per_class): filename = '../data/SNR003/' + str(i) + '/tomotarget' + str(499-j) + '.mrc' files.append(filename) Y.append(classes.index(i)) count = 0 # print(files) while True: if count == img_per_class*10: count = 0 f = mrcfile.open(files[count]) x = np.asarray(f.data) # x = norm_3d(x) x = pad(x) x = np.expand_dims(x, axis = 0) x = np.expand_dims(x, axis = 4) y = Y[count] y_arr = np.zeros(10) y_arr[y] = 1 y_arr = np.expand_dims(y_arr, axis = 0) count += 1 yield x, y_arr
def read_mrc(filename): mrc = mrcfile.open(filename) data = mrc.data arr = np.zeros(shape=data.shape, dtype=np.uint8) arr[:] = data[:] data = 0 return arr
def __init__( self, mrc, pixelsize, voltage, spherical_abberation, amplitude_contrast, low_cutoff_res=30, high_cutoff_res=5, ): self.pixelsize = pixelsize self.voltage = voltage self.spherical_abberation = spherical_abberation self.amplitude_contrast = amplitude_contrast self.electron_wavelength = wavelength_from_voltage(voltage) if type(mrc) == str: with mrcfile.open(mrc, permissive=True) as f: img = f.data elif type(mrc) == np.ndarray: img = mrc self.spectrum = np.log( np.abs(fft.rfft2(img, s=(max(img.shape), max(img.shape))))) # keep a copy of the full spectrum self.full_spectrum = self.spectrum # reduce the spectrum to a desired range and subtract background self.spectrum = self._preprocess_spectrum(self.spectrum, low_cutoff_res, high_cutoff_res)
def process_experimental_map(map_file, filter_res, contour_level): """Filter and resample experimental maps to given resolution.""" with mrcfile.open(map_file) as mrc: pixel_size = mrc.voxel_size['x'] # Create temporary name fnHash = createHash() # Uncompress map if map_file[-3:] == '.gz': with mrcfile.open(map_file) as mrc: V_exp = mrc.data.copy() uncompressed_map = '%sExp.map'%fnHash with mrcfile.new(uncompressed_map) as mrc: mrc.set_data(V_exp) else: uncompressed_map = map_file # Assume all pixel sizes are equal and take x dimension with mrcfile.open(map_file) as mrc: pixel_size = mrc.voxel_size['x'] # Resize to pixel size of 1A/pixel ok = runJob("xmipp_image_resize -i %s -o %sResized.map --factor %f" % (uncompressed_map, fnHash, pixel_size)) # Filter to specified resolution if ok: ok = runJob("xmipp_transform_filter -i %sResized.map -o %sFiltered.map "\ "--fourier low_pass %f --sampling 1" % (fnHash, fnHash, filter_res)) # Get mask by thresholding to conout level provided if ok: ok = runJob("xmipp_transform_threshold -i %sResized.map -o %sMask.map "\ "--select below %f --substitute binarize -v 0" % (fnHash, fnHash, contour_level)) # Set filtered volume and mask if ok: with mrcfile.open(fnHash + 'Filtered.map') as mrc: Vf = mrc.data.copy() with mrcfile.open(fnHash + 'Mask.map') as mrc: Vmask = mrc_mask_to_binary(mrc.data.copy()) else: Vf, Vmask = None, None # Remove all temporary files produced os.system("rm -f %s*" % fnHash) return Vf, Vmask
def loader(imageblock): if mmap is True: mrc = mrcfile.mmap(image_path) else: mrc = mrcfile.open(image_path) imageblock.data = mrc.data pixel_size = structured_to_unstructured(mrc.voxel_size)[::-1] imageblock.pixel_size = pixel_size