def __init__( self, inputs, backcompat_horizons_phil): # inputs is an instance of class api self.horizons_phil = backcompat_horizons_phil integration_core.__init__(self) self.inputai = inputs.solution_setting_ai #C++ autoindex engine self.indexing_ai = inputs.indexing_ai #Note...here we may be working with a non-primitive cell, so # must work with systematic absences...not implemented yet. if self.indexing_ai.getData().size() < 40: return # initial protection self.inputpd = inputs.solution_pd #parameter dictionary self.inputpd["symmetry"] = inputs.one_setting["best_subsym"] self.inputframes = inputs.frames self.imagefiles = inputs.files self.spotfinder = inputs.spotfinder_results self.frame_numbers = self.spotfinder.pd['osc_start'].keys() self.frame_numbers.sort() self.image_centers = inputs.image_centers print("IMAGE CENTERS", self.image_centers) # initial resolution from DISTL resolution_est = float(self.inputpd['resolution_inspection']) print("initial resolution from DISTL", resolution_est) # resolution limit of the strong spots used for indexing resolution_str = self.indexing_ai.high().reciprocal_spacing resolution = max(resolution_est, resolution_str) print("resolution limit of the strong spots used for indexing", resolution) self.limiting_resolution = resolution #print "resolution: %.2f %.2f"%(resolution_est,resolution_str) self.pixel_size = inputs.pixel_size self.set_pixel_size(inputs.pixel_size) self.set_detector_size(int(self.inputpd["size1"]), int(self.inputpd["size2"])) self.pre2m = inputs.pre2m self.set_up_mask_focus() self.initialize_increments() T = Timer("concept") from cctbx import sgtbx self.integration_concept( image_number=0, cb_op_to_primitive=sgtbx.change_of_basis_op( ), #identity; supports only primitive lattices verbose_cv=self.horizons_phil.indexing.verbose_cv, background_factor=self.horizons_phil.integration.background_factor) T = Timer("proper") self.integration_proper()
def get_frames_from_mysql(self,params): T = Timer("frames") CART = manager(params) db = CART.connection() cursor = db.cursor() cursor.execute("SELECT * FROM %s_frame;"%params.mysql.runtag) ALL = cursor.fetchall() from cctbx.crystal_orientation import crystal_orientation orientations = [crystal_orientation( (a[8],a[9],a[10],a[11],a[12],a[13],a[14],a[15],a[16]),False) for a in ALL] return dict( frame_id = flex.int( [a[0] for a in ALL] ), wavelength = flex.double( [a[1] for a in ALL] ), beam_x = flex.double( [a[2] for a in ALL] ), beam_y = flex.double( [a[3] for a in ALL] ), distance = flex.double( [a[4] for a in ALL] ), orientation = orientations, rotation100_rad = flex.double([a[17] for a in ALL] ), rotation010_rad = flex.double([a[18] for a in ALL] ), rotation001_rad = flex.double([a[19] for a in ALL] ), half_mosaicity_deg = flex.double([a[20] for a in ALL] ), wave_HE_ang = flex.double([a[21] for a in ALL] ), wave_LE_ang = flex.double([a[22] for a in ALL] ), domain_size_ang = flex.double([a[23] for a in ALL] ), unique_file_name = [a[24] for a in ALL], )
def get_HKL(self, cursor, isoform, run_tags): name = self.db_experiment_tag if run_tags is not None: extrajoin = "JOIN %s_runs runs ON frames.runs_run_id = runs.run_id" % name for tag in run_tags.split(): tag = tag.strip() extrajoin += " AND runs.tags LIKE '%%%s%%'" % tag else: extrajoin = "" if self.params.include_negatives: extrawhere = "" else: extrawhere = "WHERE obs.i >= 0" query = """SELECT hkls.h, hkls.k, hkls.l FROM %s_observations obs JOIN %s_hkls hkls ON obs.hkls_id = hkls.hkl_id JOIN %s_isoforms isos ON hkls.isoforms_isoform_id = isos.isoform_id AND isos.isoform_id = %d JOIN %s_frames frames ON obs.frames_id = frames.frame_id AND frames.trials_id = %d JOIN %s_trial_rungroups trg ON trg.trials_id = frames.trials_id AND trg.rungroups_id = frames.rungroups_id AND trg.active %s %s""" % (name, name, name, self.isoforms[isoform]['isoform_id'], name, self.trial_id, name, extrajoin, extrawhere) #print query if len(run_tags) > 0: print "%s, isoform %s" % (run_tags, isoform) else: print "isoform %s" % isoform T = Timer("Reading db...") cursor.execute(query) del T T = Timer("Getting results...") ALL = cursor.fetchall() del T T = Timer("Parsing results...") indices = flex.miller_index([(a[0], a[1], a[2]) for a in ALL]) del T print "ISOFORM %s:" % (isoform), len(indices), "result" return indices
def get_obs_from_mysql(self,params): T = Timer("database") CART = manager(params) db = CART.connection() cursor = db.cursor() cursor.execute("SELECT DISTINCT frame_id FROM %s_spotfinder;"%params.mysql.runtag) AAA = cursor.fetchall() print "From the CV log file text output there are %d distinct frames with spotfinder spots"%len(AAA) if params.max_frames==0: cursor.execute("SELECT * FROM %s_spotfinder;"%params.mysql.runtag) else: cursor.execute("SELECT * FROM %s_spotfinder WHERE frame_id<%d;"%( params.mysql.runtag, params.max_frames)) return cursor.fetchall()
def do_GET(self): T = Timer("do_GET") parsed = urlparse(self.path) qs = parse_qs(parsed.query) expect = self.headers.getheaders("Expect") if len(expect)>=1: if True in [item.find("200")>=0 for item in expect]: self.send_response(200) # untested; has no apparent affect on libcurl return log = self.do_GET_run(qs) ctype = 'text/plain' self.send_response(200) self.send_header("Content-type", ctype) self.send_header("Content-length",len(log)) self.end_headers() self.wfile.write(log) self.opt_logging()
def get_spotfinder_url(file_object, host, port): testurl = "%s:%d" % (host, port) selector = "/spotfinder" start_index = 0 stop_index = file_object.linearintdata.size() raw_string = file_object.linearintdata.slice_to_byte_str( start_index, stop_index) query_object = [ ("moduleindex", file_object.__dict__.get("sliceindex", -1)), ("filename", file_object.filename), ("bin", 1), ("vendortype", file_object.vendortype), ("beam_center_reference_frame", file_object.beam_center_reference_frame), ("beam_center_convention", file_object.beam_center_convention), ("header", file_object.header), ("headerlines", ""), ] for item in [ 'DISTANCE', 'PHI', 'WAVELENGTH', 'TWOTHETA', 'OSC_RANGE', 'CCD_IMAGE_SATURATION', 'OSC_START', 'DETECTOR_SN', 'PIXEL_SIZE', 'SIZE1', 'SIZE2', 'BEAM_CENTER_X', 'BEAM_CENTER_Y' ]: query_object.append((item, file_object.parameters[item])) files = [("adsc_data", file_object.filename, raw_string)] print "length of data in ints", stop_index print "length of data in bytes", len(raw_string) assert len(raw_string) / 4 == stop_index T = Timer("do_POST") Response = post_multipart(host=testurl, selector=selector, fields=query_object, files=files) del T print Response.getresponse().read()
import random from math import pi from libtbx.test_utils import approx_equal from libtbx.development.timers import Timer from dials.algorithms.refinement.refinement_helpers import ( dR_from_axis_and_angle as dR_from_axis_and_angle_cpp, ) from dials.algorithms.refinement.refinement_helpers import dR_from_axis_and_angle_py trials = [] for i in range(10000): # generate random axis and angle trials.append((matrix.col.random(3, -1, 1).normalize(), random.uniform(0, 2 * pi))) t = Timer("dR_from_axis_and_angle in C++") dR_0 = [] for trial in trials: dR_0.append(dR_from_axis_and_angle_cpp(trial[0], trial[1])) del t print() t = Timer("dR_from_axis_and_angle in Python") dR_1 = [] for trial in trials: dR_1.append(dR_from_axis_and_angle_py(trial[0], trial[1])) del t print()
def do_POST(self): T = Timer("do_POST") parsed = urlparse(self.path) qs = parse_qs(parsed.query) expect = self.headers.getheaders("Expect") if len(expect) >= 1: if True in [item.find("200") >= 0 for item in expect]: self.send_response( 200) # untested; has no apparent affect on libcurl return # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) L.append(self.rfile.read(chunk_size)) size_remaining -= len(L[-1]) data = ''.join(L) post_data = StringIO(data) # Parse the multipart/form-data contentTypeHeader = self.headers.getheaders('content-type').pop() # Extract the boundary parameter in the content-type header headerParameters = contentTypeHeader.split(";") boundary = headerParameters[1].split("=") boundary = boundary[1].strip() parts = cgi.parse_multipart( post_data, { "boundary": boundary, "content-disposition": self.headers.getheaders('content-disposition') }) print "*****************************" for item in parts.keys(): if len(parts[item][0]) < 1000: print item, parts[item] print "*****************************" if parts["filename"][0].find("EXIT") >= 0: self.shutdown() return from spotfinder.diffraction.imagefiles import spotfinder_image_files as ImageFiles from spotfinder.diffraction.imagefiles import Spotspickle_argument_module response_params = copy.deepcopy(common_parameters_singleton).extract() Files = ImageFiles(Spotspickle_argument_module(parts["filename"][0]), response_params) print "Final image object:" Files.images[0].show_header() print "beam_center_convention", Files.images[0].beam_center_convention print "beam_center_reference_frame", Files.images[ 0].beam_center_reference_frame logfile = StringIO() if response_params.distl.bins.verbose: sys.stdout = logfile from spotfinder.applications.wrappers import spotfinder_factory S = spotfinder_factory(None, Files, response_params) print sys.stdout = sys.__stdout__ frames = Files.frames() sys.stdout = logfile print "Image: %s" % parts["filename"][0] from spotfinder.applications.stats_distl import pretty_image_stats, notes for frame in frames: #pretty_image_stats(S,frame) #notes(S,frames[0]) module_image_stats(S, frame) sys.stdout = sys.__stdout__ log = logfile.getvalue() print log ctype = 'text/plain' self.send_response(200) self.send_header("Content-type", ctype) self.send_header("Content-length", len(log)) self.end_headers() self.wfile.write(log) self.opt_logging()
def __init__(self, datadir, n_frame, transmittance, apply_noise, plot=False, esd_plot=False, half_data_flag=0): # read the ground truth values back in import cPickle as pickle ordered_intensities = pickle.load( open(os.path.join(datadir, "intensities.pickle"), "rb")) frames = pickle.load(open(os.path.join(datadir, "frames.pickle"), "rb")) sim = pickle.load( open(os.path.join(datadir, "simulated%05d_0.pickle" % n_frame), "rb")) print "accepted obs %d" % (len(sim["observed_intensity"])) FSIM = prepare_simulation_with_noise( sim, transmittance=transmittance, apply_noise=apply_noise, ordered_intensities=ordered_intensities, half_data_flag=half_data_flag) I, I_visited, G, G_visited = I_and_G_base_estimate(FSIM) model_I = ordered_intensities.data()[0:len(I)] model_G = frames["scale_factors"][0:len(G)] model_B = frames["B_factors"][0:len(G)] T = Timer("%d frames, %f transmittance, %s noise" % (n_frame, transmittance, { False: "NO", True: "YES" }[apply_noise])) mapper = mapper_factory(xscale6e) minimizer = mapper(I, G, I_visited, G_visited, FSIM) del T minimizer.show_summary() Fit = minimizer.e_unpack() show_correlation(Fit["G"], model_G, G_visited, "Correlation of G:") show_correlation(Fit["B"], model_B, G_visited, "Correlation of B:") show_correlation(Fit["I"], model_I, I_visited, "Correlation of I:") Fit_stddev = minimizer.e_unpack_stddev() if plot: plot_it(Fit["G"], model_G, mode="G") plot_it(Fit["B"], model_B, mode="B") plot_it(Fit["I"], model_I, mode="I") print if esd_plot: minimizer.esd_plot() from cctbx.examples.merging.show_results import show_overall_observations table1, self.n_bins, self.d_min = show_overall_observations( Fit["I"], Fit_stddev["I"], I_visited, ordered_intensities, FSIM, title="Statistics for all reflections") self.FSIM = FSIM self.ordered_intensities = ordered_intensities self.Fit_I = Fit["I"] self.Fit_I_stddev = Fit_stddev["I"] self.I_visited = I_visited
def do_POST(self): T = Timer("do_POST") parsed = urlparse(self.path) qs = parse_qs(parsed.query) expect = self.headers.getheaders("Expect") if len(expect) >= 1: if True in [item.find("100") >= 0 for item in expect]: self.send_response( 100) # untested; has no apparent affect on libcurl # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) L.append(self.rfile.read(chunk_size)) size_remaining -= len(L[-1]) data = ''.join(L) post_data = StringIO(data) # Parse the multipart/form-data contentTypeHeader = self.headers.getheaders('content-type').pop() # Extract the boundary parameter in the content-type header headerParameters = contentTypeHeader.split(";") boundary = headerParameters[1].split("=") boundary = boundary[1].strip() parts = cgi.parse_multipart( post_data, { "boundary": boundary, "content-disposition": self.headers.getheaders('content-disposition') }) print("*****************************") for item in parts.keys(): if len(parts[item][0]) < 1000: print(item, parts[item]) print("*****************************") from iotbx.detectors.image_from_http_request import module_or_slice_from_http_request imgobj = module_or_slice_from_http_request(parts) imgobj.read() print("Final image object:") imgobj.show_header() from spotfinder.diffraction.imagefiles import image_files, file_names from spotfinder.diffraction.imagefiles import Spotspickle_argument_module from spotfinder.applications.overall_procedure import spotfinder_no_pickle class server_imagefiles(image_files): def __init__(self): pass Files = server_imagefiles() Files.filenames = file_names( Spotspickle_argument_module(imgobj.filename)) Files.images = [imgobj] S = spotfinder_no_pickle(Files, s3_passthru="-s3 4", spot_convention=0) frames = Files.frames() logfile = StringIO() sys.stdout = logfile from spotfinder.applications.stats_distl import pretty_image_stats, notes for frame in frames: #pretty_image_stats(S,frame) #notes(S,frames[0]) module_image_stats(S, frame) sys.stdout = sys.__stdout__ log = logfile.getvalue() print(log) ctype = 'text/plain' self.send_response(200) self.send_header("Content-type", ctype) self.send_header("Content-length", len(log)) self.end_headers() self.wfile.write(log) self.opt_logging()
def read_data(self,params): from os import listdir, path from libtbx import easy_pickle from cctbx.crystal_orientation import crystal_orientation # XXX Necessary later? #directory = "/net/viper/raid1/hattne/L220/merging/05fs" #directory = "/reg/d/psdm/cxi/cxib8113/scratch/sauter/metrology/008" #directory = "/reg/d/psdm/xpp/xpp74813/scratch/sauter/metrology/204" #directory = "/net/viper/raid1/hattne/L220/merging/test" #directory = "/reg/d/psdm/xpp/xppb4313/scratch/brewster/results/r0243/003/integration" #directory = "/reg/d/psdm/cxi/cxic0614/scratch/sauter/metrology/004/integration" #directory = "/reg/d/psdm/cxi/cxic0614/scratch/sauter/metrology/150/integration" #directory = "/reg/d/psdm/cxi/cxic0614/scratch/sauter/metrology/152/integration" directory = "/reg/d/psdm/cxi/cxib6714/scratch/sauter/metrology/009/integration" dir_glob = "/reg/d/psdm/CXI/cxib6714/scratch/sauter/results/r*/009/integration" dir_glob = "/reg/d/psdm/CXI/cxib6714/scratch/sauter/results/r*/801/integration" dir_glob = "/reg/d/psdm/xpp/xpp74813/scratch/sauter/r*/216/integration" dir_glob = "/reg/d/psdm/xpp/xpp74813/ftc/sauter/result/r*/104/integration" dir_glob = "/reg/d/psdm/cxi/cxid9114/scratch/sauter/metrology/001/integration" dir_glob = "/reg/d/psdm/CXI/cxid9114/ftc/brewster/results/r00[3-4]*/003/integration" dir_glob = "/reg/d/psdm/CXI/cxid9114/ftc/sauter/results/r00[3-4]*/004/integration" dir_glob = "/reg/d/psdm/CXI/cxid9114/ftc/sauter/results/r00[3-4]*/006/integration" dir_list = ["/reg/d/psdm/CXI/cxid9114/ftc/brewster/results/r%04d/006/integration"%seq for seq in range(95,115)] dir_list = ["/reg/d/psdm/CXI/cxid9114/ftc/sauter/results/r%04d/018/integration"%seq for seq in range(102,115)] dir_list = params.data T = Timer("populate C++ store with register line") itile = flex.int() self.spotfx = flex.double() self.spotfy = flex.double() self.spotcx = flex.double() self.spotcy = flex.double() self.observed_cntr_x = flex.double() self.observed_cntr_y = flex.double() self.refined_cntr_x = flex.double() self.refined_cntr_y = flex.double() self.HKL = flex.miller_index() self.radial = flex.double() self.azimut = flex.double() self.FRAMES = dict( frame_id=flex.int(), wavelength=flex.double(), beam_x=flex.double(), beam_y=flex.double(), distance=flex.double(), orientation=[], rotation100_rad=flex.double(), rotation010_rad=flex.double(), rotation001_rad=flex.double(), half_mosaicity_deg=flex.double(), wave_HE_ang=flex.double(), wave_LE_ang=flex.double(), domain_size_ang=flex.double(), unique_file_name=[] ) self.frame_id = flex.int() import glob #for directory in glob.glob(dir_glob): for directory in dir_list: if self.params.max_frames is not None and len(self.FRAMES['frame_id']) >= self.params.max_frames: break for entry in listdir(directory): tttd = d = easy_pickle.load(path.join(directory, entry)) # XXX Hardcoded, should honour the phil! And should be verified # to be consistent for each correction vector later on! #import pdb; pdb.set_trace() setting_id = d['correction_vectors'][0][0]['setting_id'] #if setting_id != 5: #if setting_id != 12: if setting_id != self.params.bravais_setting_id: #if setting_id != 22: #print "HATTNE BIG SLIPUP 1" continue # Assert that effective_tiling is consistent, and a non-zero # multiple of eight (only whole sensors considered for now--see # mark10.fit_translation4.print_table()). self.tiles is # initialised to zero-length in the C++ code. XXX Should now be # able to retire the "effective_tile_boundaries" parameter. # # XXX Other checks from correction_vector plot, such as consistent # setting? if hasattr(self, 'tiles') and len(self.tiles) > 0: assert (self.tiles == d['effective_tiling']).count(False) == 0 else: assert len(d['effective_tiling']) > 0 \ and len(d['effective_tiling']) % 8 == 0 self.tiles = d['effective_tiling'] if not self.standalone_check(self,setting_id,entry,d,params.diff_cutoff): continue # Reading the frame data. The frame ID is just the index of the # image. self.FRAMES['frame_id'].append(len(self.FRAMES['frame_id']) + 1) # XXX try zero-based here self.FRAMES['wavelength'].append(d['wavelength']) self.FRAMES['beam_x'].append(d['xbeam']) self.FRAMES['beam_y'].append(d['ybeam']) self.FRAMES['distance'].append(d['distance']) self.FRAMES['orientation'].append(d['current_orientation'][0]) self.FRAMES['rotation100_rad'].append(0) # XXX FICTION self.FRAMES['rotation010_rad'].append(0) # XXX FICTION self.FRAMES['rotation001_rad'].append(0) # XXX FICTION self.FRAMES['half_mosaicity_deg'].append(0) # XXX FICTION # self.FRAMES['wave_HE_ang'].append(0.995 * d['wavelength']) # XXX FICTION -- what does Nick use? # self.FRAMES['wave_LE_ang'].append(1.005 * d['wavelength']) # XXX FICTION self.FRAMES['wave_HE_ang'].append(d['wavelength']) self.FRAMES['wave_LE_ang'].append(d['wavelength']) self.FRAMES['domain_size_ang'].append(5000) # XXX FICTION self.FRAMES['unique_file_name'].append(path.join(directory, entry)) print "added frame", self.FRAMES['frame_id'][-1],entry for cv in d['correction_vectors'][0]: # Try to reproduce every predicition using the model from the # frame -- skip CV if fail. Could be because of wrong HKL:s? # # Copy these two images to test directory to reproduce: # int-s01-2011-02-20T21:27Z37.392_00000.pickle # int-s01-2011-02-20T21:27Z37.725_00000.pickle from rstbx.bandpass import use_case_bp3, parameters_bp3 from scitbx.matrix import col from math import hypot, pi indices = flex.miller_index() indices.append(cv['hkl']) parameters = parameters_bp3( indices=indices, orientation=self.FRAMES['orientation'][-1], incident_beam=col(self.INCIDENT_BEAM), packed_tophat=col((1.,1.,0.)), detector_normal=col(self.DETECTOR_NORMAL), detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)), pixel_size=col((0.11,0.11,0)), # XXX hardcoded, twice! pixel_offset=col((0.,0.,0.0)), distance=self.FRAMES['distance'][-1], detector_origin=col((-self.FRAMES['beam_x'][-1], -self.FRAMES['beam_y'][-1], 0)) ) ucbp3 = use_case_bp3(parameters=parameters) ucbp3.set_active_areas(self.tiles) integration_signal_penetration=0.5 ucbp3.set_sensor_model(thickness_mm=0.5, mu_rho=8.36644, # CS_PAD detector at 1.3 Angstrom signal_penetration=integration_signal_penetration) half_mosaicity_rad = self.FRAMES['half_mosaicity_deg'][-1] * pi/180. ucbp3.set_mosaicity(half_mosaicity_rad) ucbp3.set_bandpass(self.FRAMES['wave_HE_ang'][-1], self.FRAMES['wave_LE_ang'][-1]) ucbp3.set_orientation(self.FRAMES['orientation'][-1]) ucbp3.set_domain_size(self.FRAMES['domain_size_ang'][-1]) ucbp3.picture_fast_slow_force() ucbp3_prediction = 0.5 * (ucbp3.hi_E_limit + ucbp3.lo_E_limit) diff = hypot(ucbp3_prediction[0][0] - cv['predspot'][1], ucbp3_prediction[0][1] - cv['predspot'][0]) if diff > self.params.diff_cutoff: print "HATTNE INDEXING SLIPUP" continue # For some reason, the setting_id is recorded for each # correction vector as well--assert that it is consistent. #if cv['setting_id'] != setting_id: # print "HATTNE BIG SLIPUP 2" assert cv['setting_id'] == setting_id # For each observed spot, figure out what tile it is on, and # store in itile. XXX This is probably not necessary here, as # correction_vector_store::register_line() does the same thing. obstile = None for i in range(0, len(self.tiles), 4): if cv['obsspot'][0] >= self.tiles[i + 0] \ and cv['obsspot'][0] <= self.tiles[i + 2] \ and cv['obsspot'][1] >= self.tiles[i + 1] \ and cv['obsspot'][1] <= self.tiles[i + 3]: obstile = i break assert obstile is not None itile.append(obstile) # XXX unused variable? # ID of current frame. self.frame_id.append(self.FRAMES['frame_id'][-1]) self.spotfx.append(cv['obsspot'][0]) self.spotfy.append(cv['obsspot'][1]) self.spotcx.append(cv['predspot'][0]) self.spotcy.append(cv['predspot'][1]) self.observed_cntr_x.append(cv['obscenter'][0]) self.observed_cntr_y.append(cv['obscenter'][1]) self.refined_cntr_x.append(cv['refinedcenter'][0]) self.refined_cntr_y.append(cv['refinedcenter'][1]) self.HKL.append(cv['hkl']) self.azimut.append(cv['azimuthal']) self.radial.append(cv['radial']) #print self.FRAMES['frame_id'][-1] # Should honour the max_frames phil parameter #if len(self.FRAMES['frame_id']) >= 1000: if self.params.max_frames is not None and \ len(self.FRAMES['frame_id']) >= self.params.max_frames: break """ For 5000 first images: STATS FOR TILE 14 sel_delx -6.59755265524 -4.41676757746e-10 5.7773557278 sel_dely -6.30796620634 -8.3053734774e-10 6.3362200841 symmetric_offset_x -6.5975526548 -2.73229417105e-15 5.77735572824 symmetric_offset_y -6.30796620551 1.16406818748e-15 6.33622008493 symmetric rsq 0.000255199593417 2.95803352999 56.1918083904 rmsd 1.71989346472 For 10000 first images: STATS FOR TILE 14 sel_delx -6.92345292727 6.9094552919e-10 611.497770006 sel_dely -6.39690476093 1.1869355797e-09 894.691806871 symmetric_offset_x -6.92345292796 1.28753258216e-14 611.497770005 symmetric_offset_y -6.39690476212 -2.10251420168e-15 894.69180687 symmetric rsq 1.58067791823e-05 30.3331143761 1174402.952 rmsd 5.50755066941 """ # This is mark3.fit_translation2.nominal_tile_centers() self.To_x = flex.double(len(self.tiles) // 4) self.To_y = flex.double(len(self.tiles) // 4) for x in range(len(self.tiles) // 4): self.To_x[x] = (self.tiles[4 * x + 0] + self.tiles[4 * x + 2]) / 2 self.To_y[x] = (self.tiles[4 * x + 1] + self.tiles[4 * x + 3]) / 2 delx = self.spotcx - self.spotfx dely = self.spotcy - self.spotfy self.delrsq = self.delrsq_functional(calcx = self.spotcx, calcy = self.spotcy) self.initialize_per_tile_sums() self.tile_rmsd = [0.]*(len(self.tiles) // 4) self.asymmetric_tile_rmsd = [0.]*(len(self.tiles) // 4) # XXX Is (beam1x, beam1y) really observed center and (beamrx, # beamry) refined center? Nick thinks YES! # #itile2 = flex.int([self.register_line(a[2],a[3],a[4],a[5],a[6],a[7],a[8],a[9]) for a in ALL]) itile2 = flex.int( [self.register_line(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]) for a in zip(self.observed_cntr_x, self.observed_cntr_y, self.refined_cntr_x, self.refined_cntr_y, self.spotfx, self.spotfy, self.spotcx, self.spotcy)]) if params.show_consistency: consistency_controls(self,params) T = Timer("calcs based on C++ store") self.selections = [] self.selection_counts = [] for x in range(len(self.tiles) // 4): if self.tilecounts[x]==0: self.radii[x] = 0 self.mean_cv[x] = matrix.col((0, 0)) else: self.radii[x]/=self.tilecounts[x] self.mean_cv[x] = matrix.col(self.mean_cv[x]) / self.tilecounts[x] selection = (self.master_tiles == x) self.selections.append(selection) selected_cv = self.master_cv.select(selection) self.selection_counts.append(selected_cv.size()) # for curvatures if len(selected_cv)>0: self.asymmetric_tile_rmsd[x] = math.sqrt(flex.mean (self.delrsq.select(selection))) sel_delx = delx.select(selection) sel_dely = dely.select(selection) symmetric_offset_x = sel_delx - self.mean_cv[x][0] symmetric_offset_y = sel_dely - self.mean_cv[x][1] symmetricrsq = symmetric_offset_x*symmetric_offset_x + symmetric_offset_y*symmetric_offset_y self.tile_rmsd[x] =math.sqrt(flex.mean(symmetricrsq)) else: self.asymmetric_tile_rmsd[x]=0. self.tile_rmsd[x]=0. self.overall_N = flex.sum(flex.int( [int(t) for t in self.tilecounts] )) self.overall_cv = matrix.col(self.overall_cv)/self.overall_N self.overall_rmsd = math.sqrt( self.sum_sq_cv / self.overall_N ) # master weights for mark3 calculation takes 0.3 seconds self.master_weights = flex.double(len(self.master_tiles)) self.largest_sample = max(self.tilecounts) for x in range(len(self.tiles) // 4): self.master_weights.set_selected( self.selections[x], self.tile_weight(x)) print "AFTER read cx, cy", flex.mean(self.spotcx), flex.mean(self.spotcy) print "AFTER read fx, fy", flex.mean(self.spotfx), flex.mean(self.spotfy) print "AFTER read rmsd_x, rmsd_y", math.sqrt(flex.mean(flex.pow(self.spotcx - self.spotfx, 2))), \ math.sqrt(flex.mean(flex.pow(self.spotcy - self.spotfy, 2))) return
def __init__(self, datadir, work_params, plot=False, esd_plot=False, half_data_flag=0): casetag = work_params.output.prefix # read the ground truth values back in import cPickle as pickle # it is assumed (for now) that the reference millers contain a complete asymmetric unit # of indices, within the (d_max,d_min) region of interest and possibly outside the region. reference_millers = pickle.load( open(os.path.join(datadir, casetag + "_miller.pickle"), "rb")) experiment_manager = read_experiments(work_params) obs = pickle.load( open(os.path.join(datadir, casetag + "_observation.pickle"), "rb")) print "Read in %d observations" % (len(obs["observed_intensity"])) reference_millers.show_summary(prefix="Miller index file ") print len(obs["frame_lookup"]), len( obs["observed_intensity"]), flex.max( obs['miller_lookup']), flex.max(obs['frame_lookup']) max_frameno = flex.max(obs["frame_lookup"]) from iotbx import mtz mtz_object = mtz.object(file_name=work_params.scaling.mtz_file) #for array in mtz_object.as_miller_arrays(): # this_label = array.info().label_string() # print this_label, array.observation_type() I_sim = mtz_object.as_miller_arrays()[0].as_intensity_array() I_sim.show_summary() MODEL_REINDEX_OP = work_params.model_reindex_op I_sim = I_sim.change_basis(MODEL_REINDEX_OP).map_to_asu() #match up isomorphous (the simulated fake F's) with experimental unique set matches = miller.match_multi_indices( miller_indices_unique=reference_millers.indices(), miller_indices=I_sim.indices()) print "original unique", len(reference_millers.indices()) print "isomorphous set", len(I_sim.indices()) print "pairs", len(matches.pairs()) iso_data = flex.double(len(reference_millers.indices())) for pair in matches.pairs(): iso_data[pair[0]] = I_sim.data()[pair[1]] reference_data = miller.array(miller_set=reference_millers, data=iso_data) reference_data.set_observation_type_xray_intensity() FOBS = prepare_observations_for_scaling( work_params, obs=obs, reference_intensities=reference_data, files=experiment_manager.get_files(), half_data_flag=half_data_flag) I, I_visited, G, G_visited = I_and_G_base_estimate(FOBS, params=work_params) print "I length", len(I), "G length", len( G), "(Reference set; entire asymmetric unit)" assert len(reference_data.data()) == len(I) #presumably these assertions fail when half data are taken for CC1/2 or d_min is cut model_I = reference_data.data()[0:len(I)] T = Timer("%d frames" % (len(G), )) mapper = mapper_factory(xscale6e) minimizer = mapper(I, G, I_visited, G_visited, FOBS, params=work_params, experiments=experiment_manager.get_experiments()) del T minimizer.show_summary() Fit = minimizer.e_unpack() Gstats = flex.mean_and_variance(Fit["G"].select(G_visited == 1)) print "G mean and standard deviation:", Gstats.mean( ), Gstats.unweighted_sample_standard_deviation() if "Bfactor" in work_params.levmar.parameter_flags: Bstats = flex.mean_and_variance(Fit["B"].select(G_visited == 1)) print "B mean and standard deviation:", Bstats.mean( ), Bstats.unweighted_sample_standard_deviation() show_correlation(Fit["I"], model_I, I_visited, "Correlation of I:") Fit_stddev = minimizer.e_unpack_stddev() # XXX FIXME known bug: the length of Fit["G"] could be smaller than the length of experiment_manager.get_files() # Not sure if this has any operational drawbacks. It's a result of half-dataset selection. if plot: plot_it(Fit["I"], model_I, mode="I") if "Rxy" in work_params.levmar.parameter_flags: show_histogram(Fit["Ax"], "Histogram of x rotation (degrees)") show_histogram(Fit["Ay"], "Histogram of y rotation (degrees)") print if esd_plot: minimizer.esd_plot() from cctbx.examples.merging.show_results import show_overall_observations table1, self.n_bins, self.d_min = show_overall_observations( Fit["I"], Fit_stddev["I"], I_visited, reference_data, FOBS, title="Statistics for all reflections", work_params=work_params) self.FSIM = FOBS self.ordered_intensities = reference_data self.reference_millers = reference_millers self.Fit_I = Fit["I"] self.Fit_I_stddev = Fit_stddev["I"] self.I_visited = I_visited self.Fit = Fit self.experiments = experiment_manager