def update_run_progress(self, run_position=0, image_name="", run_data=None): """Publish run status to the Redis database for Remote project"s benefit The run_data looks like this: {"status": "STARTED", "distance": 650.0, "phi": 0.0, "run_number": 1, "kappa": 0.0, "width": 0.5, "beamline": "C", "file_source": "PILATUS", "twotheta": 0.0, "start": 1, "prefix": "I5_GABA", "time": 0.20000000000000001, "directory": "/gpfs8/users/GU/Lodowski_C_Nov14/images/nick/H4_GABA/0_0", "total": 220, "omega": 100.0, "axis": "omega"} """ self.logger.debug("Remote.update_run_progress") if image_name.endswith(".cbf"): my_repr = re.sub(r"\d{4}\.cbf", r"####.cbf", image_name) elif image_name.endswith(".img"): my_repr = re.sub(r"\d{3}\.img", r"###.img", image_name) if run_data == None: run_data = {} assembled_data = { "beamline": self.beamline, "run_id": run_data.get("run_id", 0), "run_position": run_position, "run_start": run_data.get("start", 1), "run_total": run_data.get("total", 0), "directory": run_data.get("directory", ""), "current_image": image_name, "omega_start": run_data.get("omega", 0.0), "omega_delta": run_data.get("width", 0.0), "exposure": run_data.get("time", 0.0), "distance": run_data.get("distance", 0.0), "date": datetime.datetime.now().isoformat(), "repr": my_repr } # Publish data through redis self.logger.debug("Publishing run_update_%s %s", (self.settings.ID, json.dumps(assembled_data))) self.redis.publish("run_update_%s" % self.settings.ID, json.dumps(assembled_data)) # Save to Mongo try: self.mongo_remote.run_data.update( {"run_id": run_data.get("run_id")}, assembled_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def run(self): """The core process of the Launcher instance""" # Set up overwatcher if self.overwatch_id: self.ow_registrar = Registrar(site=self.site, ow_type="launcher", ow_id=self.overwatch_id) self.ow_registrar.register({ "site_id": json.dumps(self.launcher.get('site_tag')), "job_list": self.job_list }) try: timer = 0 # This is the server portion of the code while self.running: # Have Registrar update status every second if round(timer % 1, 1) in (0.0, 1.0): if self.overwatch_id: #self.ow_registrar.update({"site_id":self.site.ID, self.ow_registrar.update({ "site_id": json.dumps(self.launcher.get('site_tag')), "job_list": self.job_list }) #self.ow_registrar.update({"job_list":self.job_list}) # Look for a new command # This will throw a redis.exceptions.ConnectionError if redis is unreachable #command = self.redis.brpop(["RAPD_JOBS",], 5) try: while self.redis.llen(self.job_list) != 0: command = self.redis.rpop(self.job_list) # Handle the message if command: self.handle_command(json.loads(command)) # Only run 1 command # self.running = False # break # sleep a little when jobs aren't coming in. time.sleep(0.2) timer += 0.2 except redis.exceptions.ConnectionError: if self.logger: self.logger.exception( "Remote Redis is not up. Waiting for Sentinal to switch to new host" ) time.sleep(1) except KeyboardInterrupt: self.stop()
def main(args): """ The main process docstring This function is called when this module is invoked from the commandline """ # print args results = run_phaser_module(args) if args.json: print json.dumps(results)
def main(args): """ The main process docstring This function is called when this module is invoked from the commandline """ # print args results = run_phaser_module(args) if args.json: print json.dumps(results)
def update_run_progress(self, run_position=0, image_name="", run_data=None): """Publish run status to the Redis database for Remote project's benefit The run_data looks like this: {'status': 'STARTED', 'distance': 650.0, 'phi': 0.0, 'run_number': 1, 'kappa': 0.0, 'width': 0.5, 'beamline': 'C', 'file_source': 'PILATUS', 'twotheta': 0.0, 'start': 1, 'prefix': 'I5_GABA', 'time': 0.20000000000000001, 'directory': '/gpfs8/users/GU/Lodowski_C_Nov14/images/nick/H4_GABA/0_0', 'total': 220, 'omega': 100.0, 'axis': 'omega'} """ if self.logger: self.logger.debug('Remote.update_run_progress') if image_name.endswith('.cbf'): my_repr = re.sub(r'\d{4}\.cbf', r'####.cbf', image_name) elif image_name.endswith('.img'): my_repr = re.sub(r'\d{3}\.img', r'###.img', image_name) if run_data == None: run_data = {} assembled_data = {'beamline' : self.beamline, 'run_id' : run_data.get('run_id', 0), 'run_position' : run_position, 'run_start' : run_data.get('start', 1), 'run_total' : run_data.get('total', 0), 'directory' : run_data.get('directory', ''), 'current_image' : image_name, 'omega_start' : run_data.get('omega', 0.0), 'omega_delta' : run_data.get('width', 0.0), 'exposure' : run_data.get('time', 0.0), 'distance' : run_data.get('distance', 0.0), 'date' : datetime.datetime.now().isoformat(), 'repr' : my_repr} # Publish data through redis if self.logger: self.logger.debug("Publishing run_update_%s %s" % (self.beamline, json.dumps(assembled_data))) self.redis.publish("run_update_%s" % self.beamline, json.dumps(assembled_data)) # Save to Mongo try: self.mongo_remote.run_data.update( {"run_id":run_data.get('run_id')}, assembled_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def update_run_progress(self, run_position=0, image_name="", run_data=None): """Publish run status to the Redis database for Remote project"s benefit The run_data looks like this: {"status": "STARTED", "distance": 650.0, "phi": 0.0, "run_number": 1, "kappa": 0.0, "width": 0.5, "beamline": "C", "file_source": "PILATUS", "twotheta": 0.0, "start": 1, "prefix": "I5_GABA", "time": 0.20000000000000001, "directory": "/gpfs8/users/GU/Lodowski_C_Nov14/images/nick/H4_GABA/0_0", "total": 220, "omega": 100.0, "axis": "omega"} """ self.logger.debug("Remote.update_run_progress") if image_name.endswith(".cbf"): my_repr = re.sub(r"\d{4}\.cbf", r"####.cbf", image_name) elif image_name.endswith(".img"): my_repr = re.sub(r"\d{3}\.img", r"###.img", image_name) if run_data == None: run_data = {} assembled_data = {"beamline" : self.beamline, "run_id" : run_data.get("run_id", 0), "run_position" : run_position, "run_start" : run_data.get("start", 1), "run_total" : run_data.get("total", 0), "directory" : run_data.get("directory", ""), "current_image" : image_name, "omega_start" : run_data.get("omega", 0.0), "omega_delta" : run_data.get("width", 0.0), "exposure" : run_data.get("time", 0.0), "distance" : run_data.get("distance", 0.0), "date" : datetime.datetime.now().isoformat(), "repr" : my_repr} # Publish data through redis self.logger.debug("Publishing run_update_%s %s", (self.settings.ID, json.dumps(assembled_data))) self.redis.publish("run_update_%s" % self.settings.ID, json.dumps(assembled_data)) # Save to Mongo try: self.mongo_remote.run_data.update( {"run_id":run_data.get("run_id")}, assembled_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def push_command(self, command): """ Handle an incoming command Keyword arguments: command -- command from redis """ print "push_command" #pprint(command) # Split up the command message = command if self.logger: self.logger.debug( "Command received channel:RAPD_JOBS message: %s", message) # get the site_tag from the image header to determine beamline where is was collected. site_tag = launch_tools.get_site_tag(message) # get the correct running launcher and launch_dir launcher, launch_dir = self.set_launcher(message['command'], site_tag) if message['command'].startswith('INTEGRATE'): print 'type: %s...%s' % (message['preferences']['xdsinp'][:100], message['preferences']['xdsinp'][-100:]) if launcher: # Update preferences to be in server run mode if not message.get("preferences"): message["preferences"] = {} message["preferences"]["run_mode"] = "server" # Pass along the Launch directory if not message.get("directories"): message["directories"] = {} message["directories"]["launch_dir"] = launch_dir # Push the job on the correct launcher job list self.redis.lpush(launcher, json.dumps(message)) if self.logger: self.logger.debug("Command sent channel:%s message: %s", launcher, message) else: self.redis.lpush('RAPD_JOBS_WAITING', json.dumps(message)) if self.logger: self.logger.debug( "Could not find a running launcher for this job. Putting job on RAPD_JOBS_WAITING list" )
def send_results(self): """Let everyone know we are working on this""" self.logger.debug("send_results") if self.preferences.get("run_mode") == "server": self.logger.debug("Sending back on redis") self.logger.debug(self.results) #if results.get('results', False): # if results['results'].get('data_produced', False): # pprint(results['results'].get('data_produced')) # Transcribe results json_results = json.dumps(self.results) # Get redis instance if not self.redis: self.connect_to_redis() # Send results back self.redis.lpush("RAPD_RESULTS", json_results) self.redis.publish("RAPD_RESULTS", json_results)
def publishDistl(image,parameters,beamline,logger=False): """Push the results out over redis""" if logger: logger.debug("publishDistl %s %s" % (image,beamline)) logger.debug(parameters) _RedisClient1.publish('%s:distl_result' % beamline, json.dumps(parameters))
def handle_return(self): """Output data to consumer - still under construction""" self.tprint("handle_return") run_mode = self.preferences.get("run_mode") print "run_mode", run_mode # Print results to the terminal if run_mode == "interactive": self.print_results() # Traditional mode as at the beamline elif run_mode == "server": json_results = json.dumps(self.results) self.redis.lpush("RAPD_RESULTS", json_results) self.redis.publish("RAPD_RESULTS", json_results) # Run and return results to launcher elif run_mode == "subprocess": return self.results # A subprocess with terminal printing elif run_mode == "subprocess-interactive": self.print_results() return self.results
def publishDistl(image, parameters, beamline, logger=False): """Push the results out over redis""" if logger: logger.debug("publishDistl %s %s" % (image, beamline)) logger.debug(parameters) _RedisClient1.publish('%s:distl_result' % beamline, json.dumps(parameters))
def send_results(self): """Let everyone know we are working on this""" self.logger.debug("send_results") if self.preferences.get("run_mode") == "server": self.logger.debug("Sending back on redis") self.logger.debug(self.results) #if results.get('results', False): # if results['results'].get('data_produced', False): # pprint(results['results'].get('data_produced')) # Transcribe results json_results = json.dumps(self.results) # Get redis instance if not self.redis: self.connect_to_redis() # Send results back self.redis.lpush("RAPD_RESULTS", json_results) self.redis.publish("RAPD_RESULTS", json_results)
def write_command_file(target_directory, command, message): """ Write the message to a command file in the target directory Keyword arguments target_directory -- directory to write the command file in command -- command type message -- contents of the command file message will be the content of the file: target_directory/command_{random chars}.rapd """ # Make sure the target directory exists if not os.path.exists(target_directory): os.makedirs(target_directory) out_file = tempfile.NamedTemporaryFile(mode="w", dir=target_directory, prefix=command+"_", suffix=".rapd", delete=False) out_file.write(json.dumps(message)) out_file.close() return out_file.name
def send_command(self, command, channel="RAPD_JOBS"): """Send a command over redis for processing""" print "send_command" pprint(command) self.redis.lpush(channel, json.dumps(command)) print "Command sent"
def send_command(self, command, channel="RAPD_JOBS"): """Send a command over redis for processing""" print "send_command" pprint(command) self.redis.lpush(channel, json.dumps(command)) print "Command sent"
def postprocess(self): """Clean up after adapter functions""" # Encode in JSON json_message = json.dumps(self.message) # Pass back result self.redis.publish("RAPD_RESULTS", json_message) self.redis.lpush("RAPD_RESULTS", json_message)
def postprocess(self): """Clean up after adapter functions""" # Encode in JSON json_message = json.dumps(self.message) # Pass back result self.redis.publish("RAPD_RESULTS", json_message) self.redis.lpush("RAPD_RESULTS", json_message)
def push_command(self, command): """ Handle an incoming command Keyword arguments: command -- command from redis """ print "push_command" #pprint(command) # Split up the command message = command if self.logger: self.logger.debug("Command received channel:RAPD_JOBS message: %s", message) # get the site_tag from the image header to determine beamline where is was collected. site_tag = launch_tools.get_site_tag(message) # get the correct running launcher and launch_dir launcher, launch_dir = self.set_launcher(message['command'], site_tag) if message['command'].startswith('INTEGRATE'): print 'type: %s'%message['preferences']['xdsinp'] if launcher: # Update preferences to be in server run mode if not message.get("preferences"): message["preferences"] = {} message["preferences"]["run_mode"] = "server" # Pass along the Launch directory if not message.get("directories"): message["directories"] = {} message["directories"]["launch_dir"] = launch_dir # Push the job on the correct launcher job list self.redis.lpush(launcher, json.dumps(message)) if self.logger: self.logger.debug("Command sent channel:%s message: %s", launcher, message) else: self.redis.lpush('RAPD_JOBS_WAITING', json.dumps(message)) if self.logger: self.logger.debug("Could not find a running launcher for this job. Putting job on RAPD_JOBS_WAITING list")
def run(self): """ The while loop for watching the files """ self.logger.info("NecatGatherer.run") # Set up overwatcher self.ow_registrar = Registrar(site=self.site, ow_type="gatherer", ow_id=self.overwatch_id) #self.ow_registrar.register({"site_id":self.site.ID}) self.ow_registrar.register({"site_id":self.tag}) #self.logger.debug(" Will publish new images on filecreate:%s" % self.tag) #self.logger.debug(" Will push new images onto images_collected:%s" % self.tag) self.logger.debug(" Will publish new datasets on run_data:%s" % self.tag) self.logger.debug(" Will push new datasets onto runs_data:%s" % self.tag) # path prefix for RDMA folder location with Eiger #if self.tag == 'NECAT_E': # path_prefix = '/epu/rdma' #else: # path_prefix = '' try: while self.go: # Check if the run info changed in beamline Redis DB. #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute() # get run info passed from RAPD #current_run = self.redis.rpop('run_info_T') #current_run = self.redis.rpop('run_info_%s'%self.tag[-1]) current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1]) if current_run_raw not in (None, ""): current_run = json.loads(current_run_raw) # get the additional beamline params and put into nice dict. run_data = self.get_run_data(current_run) if self.ignored(run_data['directory']): self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory']) else: #run_data['directory'] = dir self.logger.debug("runs_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) #self.redis.publish("run_data:%s" % self.tag, run_data) # Push onto redis list in case no one is currently listening self.redis.lpush("runs_data:%s" % self.tag, run_data_json) #self.redis.lpush("runs_data:%s" % self.tag, run_data) time.sleep(0.2) # Have Registrar update status self.ow_registrar.update({"site_id":self.tag}) except KeyboardInterrupt: self.stop()
def add_image(self, image_metadata): """Add an image to the remote system Keyword argument: image_metadata -- dict containing image metadata """ if self.logger: self.logger.debug("Remote.add_image image:%s" % image_metadata.get("fullname")) self.logger.debug(image_metadata) # Add useful info to the metadata for image_metadata["name"] = os.path.basename(image_metadata["fullname"]) image_metadata["_id"] = str(uuid.uuid1()) # Filter for no image_id key if not "image_id" in image_metadata: image_metadata["image_id"] = 0 #Publish image_metadata for subscribers try: self.logger.debug("Publishing %s %s" % (image_metadata["fullname"], json.dumps(image_metadata))) self.redis.publish(image_metadata["fullname"], json.dumps(image_metadata)) # TODO - handle redis exceptions except: if self.logger: self.logger.exception("Error publishing image metadata to Redis") # Save to Mongo try: self.mongo_remote.image_metadata.update( {"_id":image_metadata["_id"]}, image_metadata, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing image metadata to MongoDB") return image_metadata["_id"]
def run(self): """ The while loop for watching the files """ self.logger.info("NecatGatherer.run") # Set up overwatcher self.ow_registrar = Registrar(site=self.site, ow_type="gatherer", ow_id=self.overwatch_id) self.ow_registrar.register({"site_id":self.site.ID}) #self.logger.debug(" Will publish new images on filecreate:%s" % self.tag) #self.logger.debug(" Will push new images onto images_collected:%s" % self.tag) self.logger.debug(" Will publish new datasets on run_data:%s" % self.tag) self.logger.debug(" Will push new datasets onto runs_data:%s" % self.tag) # path prefix for RDMA folder location with Eiger if self.tag == 'NECAT_E': path_prefix = '/epu2/rdma' else: path_prefix = '' try: while self.go: # Check if the run info changed in beamline Redis DB. #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute() # get run info passed from RAPD #current_run = self.redis.rpop('run_info_T') #current_run = self.redis.rpop('run_info_%s'%self.tag[-1]) current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1]) if current_run_raw not in (None, ""): current_run = json.loads(current_run_raw) # get the additional beamline params and put into nice dict. run_data = self.get_run_data(current_run) if self.ignored(run_data['directory']): self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory']) else: #run_data['directory'] = dir self.logger.debug("runs_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) #self.redis.publish("run_data:%s" % self.tag, run_data) # Push onto redis list in case no one is currently listening self.redis.lpush("runs_data:%s" % self.tag, run_data_json) #self.redis.lpush("runs_data:%s" % self.tag, run_data) time.sleep(0.2) # Have Registrar update status self.ow_registrar.update({"site_id":self.site.ID}) except KeyboardInterrupt: self.stop()
def write_json(self): """Output JSON-formatted results to terminal""" json_results = json.dumps(self.results) # Write the results to a JSON-encoded file with open("result.json", "w") as out_file: out_file.write(json_results) # If running in JSON mode, print to terminal if self.preferences.get("run_mode") == "json": print json_results
def write_json(self): """Output JSON-formatted results to terminal""" json_results = json.dumps(self.results) # Write the results to a JSON-encoded file with open("result.json", "w") as out_file: out_file.write(json_results) # If running in JSON mode, print to terminal if self.preferences.get("run_mode") == "json": print json_results
def add_quick_analysis_result(self, result_data): """Process a new quickanalysis result""" if self.logger: self.logger.info("Remote.add_quick_analysis_result") self.logger.info(result_data) # Add the image to the remote client node, image_uuid = self.add_image(result_data) # Assemble results to be published assemble = { "raster_uuid": result_data.get("raster_uuid", ""), "image_uuid": image_uuid, "node": node, "status": "success", "beamline": self.beamline, "crystal_image": result_data.get("crystal_image", ""), "fullname": result_data.get("fullname", ""), "image_id": result_data.get("image_id", 0), "image_number": result_data.get("image_number", 0), "md2_x": result_data.get("md2_x", 0), "md2_y": result_data.get("md2_y", 0), "md2_z": result_data.get("md2_z", 0) } # Publish to redis connection results = json.dumps(assemble) json_assemble = json.dumps(assemble) a_key = "quickanalysis_result:"+assemble.get("raster_uuid")+":"+ \ str(assemble.get("image_number")) self.redis.publish("quickanalysis_result_%s" % self.beamline, results) # Save data in redis database self.redis.set(a_key, json_assemble) self.redis.expire(a_key, 8000000) # Expire in a month
def add_quick_analysis_result(self, result_data): """Process a new quickanalysis result""" if self.logger: self.logger.info('Remote.add_quick_analysis_result') self.logger.info(result_data) # Add the image to the remote client node, image_uuid = self.add_image(result_data) # Assemble results to be published assemble = {'raster_uuid': result_data.get('raster_uuid', ''), 'image_uuid': image_uuid, 'node': node, 'status': 'success', 'beamline': self.beamline, 'crystal_image': result_data.get('crystal_image', ''), 'fullname': result_data.get('fullname', ''), 'image_id': result_data.get('image_id', 0), 'image_number': result_data.get('image_number', 0), 'md2_x': result_data.get('md2_x', 0), 'md2_y': result_data.get('md2_y', 0), 'md2_z': result_data.get('md2_z', 0) } # Publish to redis connection results = json.dumps(assemble) json_assemble = json.dumps(assemble) a_key = 'quickanalysis_result:'+assemble.get('raster_uuid')+':'+ \ str(assemble.get('image_number')) self.redis.publish('quickanalysis_result_%s' % self.beamline, results) # Save data in redis database self.redis.set(a_key, json_assemble) self.redis.expire(a_key, 8000000) # Expire in a month
def add_quick_analysis_result(self, result_data): """Process a new quickanalysis result""" if self.logger: self.logger.info("Remote.add_quick_analysis_result") self.logger.info(result_data) # Add the image to the remote client node, image_uuid = self.add_image(result_data) # Assemble results to be published assemble = {"raster_uuid": result_data.get("raster_uuid", ""), "image_uuid": image_uuid, "node": node, "status": "success", "beamline": self.beamline, "crystal_image": result_data.get("crystal_image", ""), "fullname": result_data.get("fullname", ""), "image_id": result_data.get("image_id", 0), "image_number": result_data.get("image_number", 0), "md2_x": result_data.get("md2_x", 0), "md2_y": result_data.get("md2_y", 0), "md2_z": result_data.get("md2_z", 0) } # Publish to redis connection results = json.dumps(assemble) json_assemble = json.dumps(assemble) a_key = "quickanalysis_result:"+assemble.get("raster_uuid")+":"+ \ str(assemble.get("image_number")) self.redis.publish("quickanalysis_result_%s" % self.beamline, results) # Save data in redis database self.redis.set(a_key, json_assemble) self.redis.expire(a_key, 8000000) # Expire in a month
def cell_search(self, search_params): """search for PDBs within unit cell range.""" # Query server #print "%s/search/" % rglobals.PDBQ_SERVER response = urllib2.urlopen(urllib2.Request("%s/cell_search/" % \ self.server, data=json.dumps(search_params))).read() # Decode search result search_results = json.loads(response) # Create handy description key for k in search_results.keys(): search_results[k]["description"] = \ search_results[k].pop("struct.pdbx_descriptor") return search_results
def cell_search(self, search_params): """search for PDBs within unit cell range.""" # Query server #print "%s/search/" % PDBQ_SERVER response = urllib2.urlopen(urllib2.Request("%s/cell_search/" % \ self.server, data=json.dumps(search_params))).read() # Decode search result search_results = json.loads(response) # Create handy description key for k in search_results.keys(): search_results[k]["description"] = \ search_results[k].pop("struct.pdbx_descriptor") return search_results
def write_json(self): """Print out JSON-formatted result""" json_string = json.dumps(self.results) # If running in JSON mode, print to terminal if self.preferences.get("run_mode") == "json": print json_results # Output to terminal? #if self.preferences.get("json", False): # print json_string # Always write a file os.chdir(self.working_dir) with open("result.json", "w") as outfile: outfile.writelines(json_string)
def write_json(self): """Print out JSON-formatted result""" json_string = json.dumps(self.results) # If running in JSON mode, print to terminal if self.preferences.get("run_mode") == "json": print json_results # Output to terminal? #if self.preferences.get("json", False): # print json_string # Always write a file os.chdir(self.working_dir) with open("result.json", "w") as outfile: outfile.writelines(json_string)
def connect_pdbq(inp): """Query the PDBQ server""" _d0_ = inp l1 = ["a", "b", "c", "alpha", "beta", "gamma"] for y in range(end): _d_ = {} for x in range(len(l1)): _d_[l1[x]] = [self.cell[l2[y][x]] - self.cell[l2[y][x]] * self.percent/2, self.cell[l2[y][x]] + self.cell[l2[y][x]] *self.percent/2] # Query server response = urllib2.urlopen(urllib2.Request("%s/cell_search/" % \ PDBQ_SERVER, data=json.dumps(_d_))).read() j = json.loads(response) for k in j.keys(): j[k]["Name"] = j[k].pop("struct.pdbx_descriptor") _d0_.update(j) return _d0_
def run(self): """Start the thread""" self.logger.debug("PerformAction::run") attempts = 0 while (attempts < 10): attempts += 1 self.logger.debug("Cluster connection attempt %d", attempts) # Connect to the cluster process s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(self.settings["CLUSTER_ADDRESS"]) break except socket.error: self.logger.exception("Failed to initialize socket to cluster") time.sleep(1) else: raise RuntimeError( "Failed to initialize socket to cluster after %d attempts", attempts) # Put the command in rapd server-speak message = json.dumps(self.command) message = "<rapd_start>" + message + "<rapd_end>" MSGLEN = len(message) # Send the message total_sent = 0 while total_sent < MSGLEN: sent = s.send(message[total_sent:]) if sent == 0: raise RuntimeError("socket connection broken") total_sent += sent self.logger.debug("Message sent to cluster total_sent:%d", total_sent) # Close connection to cluster s.close() self.logger.debug("Connection to cluster closed")
def run(self): """Start the thread""" self.logger.debug("Attempting to send launch action to %s:%s", self.launcher_address[0], self.launcher_address[1]) # Put the command in rapd server-speak message = json.dumps(self.command) message = "<rapd_start>" + message + "<rapd_end>" MSGLEN = len(message) # Connect to launcher instance attempts = 0 while attempts < 10: attempts += 1 self.logger.debug("Launcher connection attempt %d", attempts) # Connect to the cluster process _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _socket.connect(self.launcher_address) break except socket.error: self.logger.exception("Failed to initialize socket to launcher") time.sleep(1) else: raise RuntimeError("Failed to initialize socket to launcher after %d attempts", attempts) # Send the message total_sent = 0 while total_sent < MSGLEN: sent = _socket.send(message[total_sent:]) if sent == 0: raise RuntimeError("socket connection broken") total_sent += sent self.logger.debug("Message sent to launcher total_sent:%d", total_sent) # Close connection to cluster _socket.close() self.logger.debug("Connection to cluster closed")
def run(self): """Start the thread""" self.logger.debug("PerformAction::run") attempts = 0 while attempts < 10: attempts += 1 self.logger.debug("Launcher connection attempt %d", attempts) # Connect to the cluster process _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _socket.connect(self.settings["LAUNCHER_ADDRESS"]) break except socket.error: self.logger.exception("Failed to initialize socket to cluster") time.sleep(1) else: raise RuntimeError("Failed to initialize socket to cluster after %d attempts", attempts) # Put the command in rapd server-speak message = json.dumps(self.command) message = "<rapd_start>" + message + "<rapd_end>" message_length = len(message) # Send the message total_sent = 0 while total_sent < message_length: sent = _socket.send(message[total_sent:]) if sent == 0: raise RuntimeError("socket connection broken") total_sent += sent self.logger.debug("Message sent to cluster total_sent:%d", total_sent) # Close connection to cluster _socket.close() self.logger.debug("Connection to cluster closed")
def connect_pdbq(inp): """Query the PDBQ server""" _d0_ = inp l1 = ["a", "b", "c", "alpha", "beta", "gamma"] for y in range(end): _d_ = {} for x in range(len(l1)): _d_[l1[x]] = [ self.cell[l2[y][x]] - self.cell[l2[y][x]] * self.percent / 2, self.cell[l2[y][x]] + self.cell[l2[y][x]] * self.percent / 2 ] # Query server response = urllib2.urlopen(urllib2.Request("%s/cell_search/" % \ PDBQ_SERVER, data=json.dumps(_d_))).read() j = json.loads(response) for k in j.keys(): j[k]["Name"] = j[k].pop("struct.pdbx_descriptor") _d0_.update(j) return _d0_
def rapd_send(controller_address, message, logger=False): """ Use standard socket-based message used in RAPD The address for the controller must be set to self.controller_address as a tuple or list ex. ("164.54.212.165", 50001) """ # Connect _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _socket.connect(tuple(controller_address)) except socket.error: if logger: logger.error("Connection refused to %s", controller_address) return False # Encode message as JSON message_encoded = json.dumps(message) # Give the message start and end tags message_encoded = "<rapd_start>" + message_encoded + "<rapd_end>" # Send # try: message_length = len(message_encoded) total_sent = 0 while total_sent < message_length: sent = _socket.send(message_encoded) total_sent += sent # Close the socket connection _socket.close() # Return return True
def rapd_send(controller_address, message, logger=False): """ Use standard socket-based message used in RAPD The address for the controller must be set to self.controller_address as a tuple or list ex. ("164.54.212.165", 50001) """ # Connect _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: _socket.connect(tuple(controller_address)) except socket.error: if logger: logger.error("Connection refused to %s", controller_address) return False # Encode message as JSON message_encoded = json.dumps(message) # Give the message start and end tags message_encoded = "<rapd_start>" + message_encoded + "<rapd_end>" # Send # try: message_length = len(message_encoded) total_sent = 0 while total_sent < message_length: sent = _socket.send(message_encoded) total_sent += sent # Close the socket connection _socket.close() # Return return True
def update_image_stats(self, result_db, wedges_db): """Publish image statistics to the Redis database for Remote project's benefit """ if self.logger: self.logger.debug('Remote.update_image_stats') self.logger.debug(result_db) self.logger.debug(wedges_db) # Data will go in here indexing_data = {} wedge = {} # If the indexing worked if result_db['labelit_status'] == 'SUCCESS': indexing_data.update({'status' : 'indexed', 'beamline' : self.beamline, 'fullname' : result_db.get('fullname', 0), 'image_id' : result_db.get('image_id', 0), 'pointgroup' : result_db.get('labelit_spacegroup'), 'a' : result_db.get('labelit_a'), 'b' : result_db.get('labelit_b'), 'c' : result_db.get('labelit_c'), 'alpha' : result_db.get('labelit_alpha'), 'beta' : result_db.get('labelit_beta'), 'gamma' : result_db.get('labelit_gamma'), 'resolution' : result_db.get('distl_labelit_res'), 'mosaicity' : result_db.get('labelit_mosaicity'), 'overloads' : result_db.get('distl_overloads')}) # If we have a normal strategy if result_db["best_norm_status"] == "SUCCESS": # Get the normal strategy wedge for wedge in wedges_db: if wedge['strategy_type'] == 'normal': break indexing_data.update({'status' : 'normal', 'normal_omega_start' : wedge.get('phi_start', -1), 'normal_omega_step' : wedge.get('delta_phi', -1), 'normal_number_images' : wedge.get('number_images', -1) }) # If we have an anomalous strategy if result_db['best_anom_status'] == 'SUCCESS': # Get the anomalous strategy wedge for wedge in wedges_db: if wedge['strategy_type'] == 'anomalous': break indexing_data.update({'status' : 'all', 'anom_omega_start' : wedge.get('phi_start', -1), 'anom_omega_step' : wedge.get('delta_phi', -1), 'anom_number_images' : wedge.get('number_images', -1) }) # No indexing solution else: indexing_data.update({'status' : 'failure', 'beamline' : self.beamline, 'fullname' : result_db.get('fullname', 0), 'image_id' : result_db.get('image_id', 0) }) # Publish to redis connectioni if self.logger: self.logger.debug("Publishing image_stats_%s %s" % (self.beamline, json.dumps(indexing_data))) self.redis.publish("image_stats_%s" % self.beamline, json.dumps(indexing_data)) # Save to Mongo try: self.mongo_remote.autoindex_data.insert(indexing_data) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def run(self): if self.logger: self.logger.info('ConsoleRedisMonitor.run') # Create redis connections # Where beamline information is coming from redis_database = importlib.import_module('database.redis_adapter') bl_database = redis_database.Database( settings=self.site.SITE_ADAPTER_SETTINGS) self.bl_redis = bl_database.connect_redis_pool() pipe = self.bl_redis.pipeline() # Where information will be published to #self.pub = BLspec.connect_redis_manager_HA() self.pub_database = redis_database.Database( settings=self.site.CONTROL_DATABASE_SETTINGS) self.pub = self.pub_database.connect_redis_manager_HA() # For beamline T #self.pubsub = self.pub.pubsub() #self.pubsub.subscribe('run_info_T') try: # Initial check of the db on startup run_data = self.pub.hgetall("current_run_" + self.beamline) if (run_data): # alert the media self.pub.publish('newdir:' + self.beamline, self.current_dir) # save the info self.pub.set('datadir_' + self.beamline, self.current_dir) # Main loop count = 1 saved_adsc_state = False while (self.Go): # Check the redis db for a new run if self.beamline == "C": current_run, current_dir, current_adsc_state, test = pipe.get( 'RUN_INFO_SV').get("ADX_DIRECTORY_SV").get( "ADSC_SV").set('RUN_INFO_SV', '').execute() elif self.beamline == "E": current_run, current_dir, current_adsc_state, test = pipe.get( "RUN_INFO_SV").get("EIGER_DIRECTORY_SV").get( "EIGER_SV").set('RUN_INFO_SV', '').execute() elif self.beamline == "T": #current_dir renamed below, but gets rid of error current_dir, current_adsc_state = pipe.get( "EIGER_DIRECTORY_SV").get("EIGER_SV").execute() current_run = self.pub.rpop('run_info_T') #print self.pub.llen('run_info_T') #print self.pub.lrange('run_info_T', 0, -1) #current_run = self.pubsub.get_message()['data'] #print current_run if current_run == None: current_run = '' if (len(current_run) > 0): if self.beamline == "E": self.pub.lpush('run_info_T', current_run) #self.pub.publish('run_info_T', current_run) # Set variable self.current_run = current_run # Split it cur_run = current_run.split( "_" ) #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp # Arbitrary wait for Console to update Redis database time.sleep(0.01) # Get extra run data extra_data = self.getRunData() if self.beamline == "T": current_dir = "/epu2/rdma%s%s_%d_%06d" % ( current_dir, extra_data['prefix'], int( cur_run[0]), int(cur_run[1])) # Compose the run_data object run_data = { 'directory': current_dir, 'prefix': extra_data['prefix'], 'run_number': int(cur_run[0]), 'start': int(cur_run[1]), 'total': int(cur_run[2]), 'distance': float(cur_run[3]), 'twotheta': extra_data['twotheta'], 'phi': extra_data['phi'], 'kappa': extra_data['kappa'], 'omega': float(cur_run[6]), 'axis': 'omega', "width": float(cur_run[7]), "time": float(cur_run[8]), "beamline": self.beamline, "file_source": beamline_settings[self.beamline]['file_source'], "status": "STARTED" } # Logging self.logger.info(run_data) #Save data into db self.pub.hmset('current_run_' + self.beamline, run_data) self.pub.publish('current_run_' + self.beamline, json.dumps(run_data)) #Signal the main thread if (self.notify): self.notify( ("%s RUN" % beamline_settings[self.beamline]['file_source'], run_data)) # Check if the data collection directory is new if (self.current_dir != current_dir): self.logger.debug("New directory") #save the new dir self.current_dir = current_dir #alert the media self.logger.debug( "Publish %s %s" % ('newdir:' + self.beamline, self.current_dir)) self.pub.publish('newdir:' + self.beamline, self.current_dir) #save the info self.pub.set('datadir_' + self.beamline, current_dir) # Watch for run aborting if (current_adsc_state == "ABORTED" and current_adsc_state != saved_adsc_state): # Keep track of the detector state saved_adsc_state = current_adsc_state # Alert the media if (self.notify): self.notify( ("%s_ABORT" % beamline_settings[self.beamline]['file_source'], None)) else: saved_adsc_state = current_adsc_state """ #### Turned off, so I dont screw up IDE #send test data for rastersnap heartbeat if (count % 100 == 0): #reset the counter count = 1 # Logging self.logger.info('Publishing filecreate:%s, %s' % (self.beamline, beamline_settings[self.beamline]['rastersnap_test_image'])) # Publish the test image self.pub.publish('filecreate:%s'%self.beamline, beamline_settings[self.beamline]['rastersnap_test_image']) # Watch the crystal & distl params if (count % 60) == 0: try: crystal_request,distl_request,best_request = pipe.get("CP_REQUESTOR_SV").get("DP_REQUESTOR_SV").get("BEST_REQUESTOR_SV").execute() if (distl_request): #if (distl_request != self.current_dpreq): if (distl_request not in self.dpreqs): self.dpreqs.append(distl_request) self.logger.debug(self.dpreqs) self.current_dpreq = distl_request if self.logger: self.logger.debug('ConsoleRedisMonitor New distl parameters request for %s' % distl_request) if (self.notify): self.notify(("DISTL_PARMS_REQUEST",distl_request)) if (crystal_request): #if (crystal_request != self.current_cpreq): if (crystal_request not in self.cpreqs): self.cpreqs.append(crystal_request) self.current_cpreq = crystal_request if self.logger: self.logger.debug('ConsoleRedisMonitor New crystal parameters request for %s' % crystal_request) if (self.notify): self.notify(("CRYSTAL_PARMS_REQUEST",crystal_request)) if (best_request): if (best_request != self.current_breq): self.current_breq = best_request if self.logger: self.logger.debug('ConsoleRedisMonitor New best parameters request') if (self.notify): self.notify(("BEST_PARMS_REQUEST",best_request)) except: self.logger.debug('ConsoleRedisMonitor Exception in querying for tracker requests') """ # Increment the counter count += 1 # Sleep before checking again time.sleep(0.1) except redis.exceptions.ConnectionError: if self.logger: self.logger.debug( 'ConsoleRedisMonitor failure to connect - will reconnect') time.sleep(10) reconnect_counter = 0 while (reconnect_counter < 1000): try: try: self.red.ping() except: self.red = redis.Redis( beamline_settings[self.beamline]['redis_ip']) try: self.pub.ping() except: """ #self.pub = redis.Redis(beamline_settings[self.beamline]['remote_redis_ip']) self.pub = pysent.RedisManager(sentinel_host="remote.nec.aps.anl.gov", sentinel_port=26379, master_name="remote_master") """ self.pub = BLspec.connect_redis_manager_HA() #test connections self.red.ping() if self.logger: self.logger.debug( 'Reconnection to redis server successful') break except: reconnect_counter += 1 if self.logger: self.logger.debug( 'Reconnection attempt %d failed, will try again' % reconnect_counter) time.sleep(10)
def register(self, custom_vars={}): """ Register the process with the central db Keyword arguments: custom_vars - dict containing custom elements to put in redis database """ # Get connection red = self.redis hostname = socket.gethostname() try: host_ip = socket.gethostbyname(socket.gethostname()) except socket.gaierror: host_ip = "unknown" # Create an entry entry = {"ow_type":self.ow_type, "host_ip":host_ip, "hostname":hostname, "id":self.uuid, "ow_id":self.ow_id, "start_time":time.time(), "status":"initializing", "timestamp":time.time()} # If custom_vars have been passed, add them entry.update(custom_vars) # Check for launchers launcher = entry.get('job_list', False) # Wrap potential redis down try: # Put entry in the redis db red.hmset("OW:"+self.uuid, entry) # Expire the current entry in N seconds red.expire("OW:"+self.uuid, OVERWATCH_TIMEOUT) # Announce by publishing red.publish("OW:registering", json.dumps(entry)) # If this process has an overwatcher if not self.ow_id == None: # Put entry in the redis db red.hmset("OW:"+self.uuid+":"+self.ow_id, entry) # Expire the current entry in N seconds red.expire("OW:"+self.uuid+":"+self.ow_id, OVERWATCH_TIMEOUT) # Used to monitor which launchers are running. if launcher: # Put entry in the redis db red.set("OW:"+launcher, 1) # Expire the current entry in N seconds red.expire("OW:"+launcher, OVERWATCH_TIMEOUT) # Redis is down except redis.exceptions.ConnectionError: print "Redis appears to be down"
def update_image_stats(self, result_db, wedges_db): """Publish image statistics to the Redis database for Remote project"s benefit """ if self.logger: self.logger.debug("Remote.update_image_stats") self.logger.debug(result_db) self.logger.debug(wedges_db) # Data will go in here indexing_data = {} wedge = {} # If the indexing worked if result_db["labelit_status"] == "SUCCESS": indexing_data.update({ "status": "indexed", "beamline": self.beamline, "fullname": result_db.get("fullname", 0), "image_id": result_db.get("image_id", 0), "pointgroup": result_db.get("labelit_spacegroup"), "a": result_db.get("labelit_a"), "b": result_db.get("labelit_b"), "c": result_db.get("labelit_c"), "alpha": result_db.get("labelit_alpha"), "beta": result_db.get("labelit_beta"), "gamma": result_db.get("labelit_gamma"), "resolution": result_db.get("distl_labelit_res"), "mosaicity": result_db.get("labelit_mosaicity"), "overloads": result_db.get("distl_overloads") }) # If we have a normal strategy if result_db["best_norm_status"] == "SUCCESS": # Get the normal strategy wedge for wedge in wedges_db: if wedge["strategy_type"] == "normal": break indexing_data.update({ "status": "normal", "normal_omega_start": wedge.get("phi_start", -1), "normal_omega_step": wedge.get("delta_phi", -1), "normal_number_images": wedge.get("number_images", -1) }) # If we have an anomalous strategy if result_db["best_anom_status"] == "SUCCESS": # Get the anomalous strategy wedge for wedge in wedges_db: if wedge["strategy_type"] == "anomalous": break indexing_data.update({ "status": "all", "anom_omega_start": wedge.get("phi_start", -1), "anom_omega_step": wedge.get("delta_phi", -1), "anom_number_images": wedge.get("number_images", -1) }) # No indexing solution else: indexing_data.update({ "status": "failure", "beamline": self.beamline, "fullname": result_db.get("fullname", 0), "image_id": result_db.get("image_id", 0) }) # Publish to redis connectioni if self.logger: self.logger.debug("Publishing image_stats_%s %s" % (self.beamline, json.dumps(indexing_data))) self.redis.publish("image_stats_%s" % self.beamline, json.dumps(indexing_data)) # Save to Mongo try: self.mongo_remote.autoindex_data.insert(indexing_data) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def register_OLD(self, custom_vars={}): """ Register the process with the central db Keyword arguments: custom_vars - dict containing custom elements to put in redis database """ # Get connection red = self.redis hostname = socket.gethostname() try: host_ip = socket.gethostbyname(socket.gethostname()) except socket.gaierror: host_ip = "unknown" # Create an entry entry = { "ow_type": self.ow_type, "host_ip": host_ip, "hostname": hostname, "ow_id": self.ow_id, "start_time": time.time(), "status": "initializing", "timestamp": time.time() } # If custom_vars have been passed, add them entry.update(custom_vars) # Check for launchers launcher = entry.get('job_list', False) #print launcher # Wrap potential redis down try: # Put entry in the redis db red.hmset("OW:" + self.uuid, entry) # Expire the current entry in N seconds red.expire("OW:" + self.uuid, OVERWATCH_TIMEOUT) # Announce by publishing red.publish("OW:registering", json.dumps(entry)) # If this process has an overwatcher if not self.ow_id == None: # Put entry in the redis db red.hmset("OW:" + self.uuid + ":" + self.ow_id, entry) # Expire the current entry in N seconds red.expire("OW:" + self.uuid + ":" + self.ow_id, OVERWATCH_TIMEOUT) # Used to monitor which launchers are running. if launcher: # Put entry in the redis db red.set("OW:" + launcher, 1) # Expire the current entry in N seconds red.expire("OW:" + launcher, OVERWATCH_TIMEOUT) # Redis is down except redis.exceptions.ConnectionError: print "Redis appears to be down"
def update_run_stats(self, result_db, wedges): """ Publish run statistics to the Redis database for Remote project's benefit """ if self.logger: self.logger.debug('Remote.update_run_stats') self.logger.debug(result_db) self.logger.debug(wedges) # Derive the session_id session_id = result_db["data_root_dir"].split("_")[-1] # integration_data = {} # Assign the basic data integration_data.update({'status' : result_db.get('integrate_status').lower(), 'beamline' : self.beamline, 'run_id' : result_db.get('run_id', -1), 'spacegroup' : result_db.get('spacegroup', 'P0'), 'a' : result_db.get('a', -1), 'b' : result_db.get('b', -1), 'c' : result_db.get('c', -1), 'alpha' : result_db.get('alpha', -1), 'beta' : result_db.get('beta', -1), 'gamma' : result_db.get('gamma', -1), 'image_start' : result_db.get('image_start', -1), 'image_end' : result_db.get('image_end', -1)}) # Organize the wedges if wedges: for wedge in wedges: wtag = wedge.get('shell_type') if wtag in ('overall', 'outer', 'inner'): integration_data.update( {'res_low_'+wtag : wedge.get('low_res', -1), 'res_high_'+wtag : wedge.get('high_res', -1), 'completeness_'+wtag : wedge.get('completeness', -1), 'multiplicity_'+wtag : wedge.get('multiplicity', -1), 'i_sigi_'+wtag : wedge.get('i_sigma', -1), 'rmeas_'+wtag : wedge.get('r_meas', -1), 'rpim_'+wtag : wedge.get('r_pim', -1), 'anom_completeness_'+wtag : wedge.get('anom_completeness', -1), 'anom_multiplicity_'+wtag : wedge.get('anom_multiplicity', -1), 'anom_rmeas_'+wtag : wedge.get('r_meas_pm', -1), 'anom_rpim_'+wtag : wedge.get('r_pim_pm', -1), 'anom_corr_'+wtag : wedge.get('anom_correlation', -1), 'anom_slope_'+wtag : wedge.get('anom_slope', -1), 'ref_total_'+wtag : wedge.get('total_obs', -1), 'ref_unique_'+wtag : wedge.get('unique_obs', -1) }) if self.logger: self.logger.debug('Sending integration_data to remote') self.logger.debug(integration_data) # Publish to redis connection #self.redis.publish('run_stats_'+self.beamline,data) # Save the data to redis j_data = json.dumps(integration_data) # Publish to redis connections self.redis.publish('run_stats_'+self.beamline, j_data) # Save to Mongo try: self.logger.debug('Saving to mongoDB') self.mongo_remote.integration_data.update( {"run_id":result_db.get("run_id", -1)}, integration_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def Q315ReadHeader(image, run_id=None, place_in_run=None, logger=False): """ Given a full file name for an ADSC image (as a string), read the header and return a dict with all the header info """ if logger: logger.debug('Q315ReadHeader %s' % image) #item:(pattern,transform) header_items = { 'header_bytes' : ("^HEADER_BYTES=\s*(\d+)\;", lambda x: int(x)), 'dim' : ("^DIM=\s*(\d+)\;", lambda x: int(x)), 'byte_order' : ("^BYTE_ORDER=\s*([\w_]+)\;", lambda x: str(x)), 'type' : ("^TYPE=\s*([\w_]+)\;", lambda x: str(x)), 'size1' : ("^SIZE1=\s*(\d+)\;", lambda x: int(x)), 'size2' : ("^SIZE2=\s*(\d+)\;", lambda x: int(x)), 'pixel_size' : ("^PIXEL_SIZE=\s*([\d\.]+)\;", lambda x: float(x)), 'bin' : ("^BIN=\s*(\w*)\;", lambda x: str(x)), 'adc' : ("^ADC=\s*(\w+)\;", lambda x: str(x)), 'detector_sn' : ("^DETECTOR_SN=\s*(\d+)\;", lambda x: int(x)), 'collect_mode' : ("^COLLECT_MODE=\s*(\w*)\;", lambda x: str(x)), 'beamline' : ("^BEAMLINE=\s*(\w+)\;", lambda x: str(x)), 'date' : ("^DATE=\s*([\w\d\s\:]*)\;", date_adsc_to_sql), 'time' : ("^TIME=\s*([\d\.]+)\;", lambda x: float(x)), 'distance' : ("^DISTANCE=\s*([\d\.]+)\;", lambda x: float(x)), 'osc_range' : ("^OSC_RANGE=\s*([\d\.]+)\;", lambda x: float(x)), 'phi' : ("^PHI=\s*([\d\.]+)\;", lambda x: float(x)), 'osc_start' : ("^OSC_START=\s*([\d\.]+)\;", lambda x: float(x)), 'twotheta' : ("^TWOTHETA=\s*([\d\.]+)\;", lambda x: float(x)), 'thetadistance': ("^THETADISTANCE=\s*([\d\.]+)\;", lambda x: float(x)), #'axis' : ("^AXIS=\s*(\w+)\;", lambda x: str(x)), 'wavelength' : ("^WAVELENGTH=\s*([\d\.]+)\;", lambda x: float(x)), 'beam_center_x': ("^BEAM_CENTER_X=\s*([\d\.]+)\;", lambda x: float(x)), 'beam_center_y': ("^BEAM_CENTER_Y=\s*([\d\.]+)\;", lambda x: float(x)), 'transmission' : ("^TRANSMISSION=\s*([\d\.]+)\;", lambda x: float(x)), 'puck' : ("^PUCK=\s*(\w+)\;", lambda x: str(x)), 'sample' : ("^SAMPLE=\s*([\d\w]+)\;" , lambda x: str(x)), 'ring_cur' : ("^RING_CUR=\s*([\d\.]+)\;", lambda x: float(x)), 'ring_mode' : ("^RING_MODE=\s*(.*)\;", lambda x: str(x)), 'aperture' : ("^MD2_APERTURE=\s*(\d+)\;", lambda x: int(x)), 'period' : ("^# Exposure_period\s*([\d\.]+) s", lambda x: float(x)), 'count_cutoff' : ("^# Count_cutoff\s*(\d+) counts", lambda x: int(x))} count = 0 while (count < 10): try: rawdata = open(image,"rb").read() headeropen = rawdata.index("{") headerclose= rawdata.index("}") header = rawdata[headeropen+1:headerclose-headeropen] break #print header except: count +=1 if logger: logger.exception('Error opening %s' % image) time.sleep(0.1) try: #tease out the info from the file name base = os.path.basename(image).rstrip(".img") #the parameters parameters = {'fullname' : image, 'detector' : 'ADSC-Q315', 'directory' : os.path.dirname(image), 'image_prefix' : "_".join(base.split("_")[0:-2]), 'run_number' : int(base.split("_")[-2]), 'image_number' : int(base.split("_")[-1]), 'axis' : 'omega', 'run_id' : run_id, 'place_in_run' : place_in_run} for label,pat in header_items.iteritems(): pattern = re.compile(pat[0], re.MULTILINE) matches = pattern.findall(header) if len(matches) > 0: parameters[label] = pat[1](matches[-1]) else: parameters[label] = None #if twotheta is in use, distance = twothetadist try: if (parameters['twotheta'] > 0 and parameters['thetadistance'] > 100): parameters['distance'] = parameters['thetadistance'] except: if logger: logger.exception('Error handling twotheta for image %s' % image) #look for bad text in certain entries NECAT-code try: json.dumps(parameters['ring_mode']) except: parameters['ring_mode'] = 'Error' #return parameters to the caller return(parameters) except: if logger: logger.exception('Error reading the header for image %s' % image)
def run(self): """ The while loop for watching the files """ print "run" self.logger.info("SercatGatherer.run") # Set up overwatcher self.ow_registrar = Registrar(site=self.site, ow_type="gatherer", ow_id=self.overwatch_id) self.ow_registrar.register({"site_id":self.site.ID}) # Get redis connection red = redis.Redis(connection_pool=self.redis_pool) print " Will publish new runs on run_data:%s" % self.tag print " Will push new runs onto runs_data:%s" % self.tag self.logger.debug(" Will publish new runs on run_data:%s" % self.tag) self.logger.debug(" Will push new runs onto runs_data:%s" % self.tag) if self.tag == 'SERCAT_BM': print " Will publish new images on image_collected:%s" % self.tag print " Will push new images onto images_collected:%s" % self.tag self.logger.debug(" Will publish new images on image_collected:%s" % self.tag) self.logger.debug(" Will push new images onto images_collected:%s" % self.tag) try: while self.go: #print "go" if self.tag == 'SERCAT_BM': # 5 rounds of checking for ___ in range(5): # Check if the run info has changed on the disk if self.check_for_run_info(): run_data = self.get_run_data() if run_data: self.logger.debug("run(s)_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis red.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening red.lpush("runs_data:%s" % self.tag, run_data_json) # 20 image checks for __ in range(20): # Check if the image file has changed if self.check_for_image_collected(): image_name = self.get_image_data() if image_name: self.logger.debug("image_collected:%s %s", self.tag, image_name) # Publish to Redis red.publish("image_collected:%s" % self.tag, image_name) # Push onto redis list in case no one is currently listening red.lpush("images_collected:%s" % self.tag, image_name) break else: time.sleep(0.05) # For SERCAT_ID else: # Check if the run info has changed on the disk if self.check_for_run_info(): run_data = self.get_run_data() if run_data: self.logger.debug("run(s)_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis red.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening red.lpush("runs_data:%s" % self.tag, run_data_json) else: time.sleep(1.0) # Have Registrar update status self.ow_registrar.update({"site_id":self.site.ID}) except KeyboardInterrupt: self.stop()
def read_header(image, run_id=None, place_in_run=None): """ Given a full file name for an ADSC image (as a string), read the header and return a dict with all the header info NB - This code was developed for the NE-CAT ADSC Q315, so the header is bound to be very specific. """ #item:(pattern,transform) header_items = {"header_bytes" : (r"^HEADER_BYTES=\s*(\d+)\;", lambda x: int(x)), "dim" : (r"^DIM=\s*(\d+)\;", lambda x: int(x)), "byte_order" : (r"^BYTE_ORDER=\s*([\w_]+)\;", lambda x: str(x)), "type" : (r"^TYPE=\s*([\w_]+)\;", lambda x: str(x)), "size1" : (r"^SIZE1=\s*(\d+)\;", lambda x: int(x)), "size2" : (r"^SIZE2=\s*(\d+)\;", lambda x: int(x)), "pixel_size" : (r"^PIXEL_SIZE=\s*([\d\.]+)\;", lambda x: float(x)), "bin" : (r"^BIN=\s*(\w*)\;", lambda x: str(x)), "adc" : (r"^ADC=\s*(\w+)\;", lambda x: str(x)), "detector_sn" : (r"^DETECTOR_SN=\s*(\d+)\;", lambda x: int(x)), "collect_mode" : (r"^COLLECT_MODE=\s*(\w*)\;", lambda x: str(x)), "site" : (r"^BEAMLINE=\s*(\w+)\;", lambda x: str(x)), "date" : (r"^DATE=\s*([\w\d\s\:]*)\;", date_adsc_to_sql), "time" : (r"^TIME=\s*([\d\.]+)\;", lambda x: float(x)), "distance" : (r"^DISTANCE=\s*([\d\.]+)\;", lambda x: float(x)), "osc_range" : (r"^OSC_RANGE=\s*([\d\.]+)\;", lambda x: float(x)), "sweeps" : (r"^SWEEPS=\s*([\d]+)\;", lambda x: int(x)), "phi" : (r"^PHI=\s*([\d\.]+)\;", lambda x: float(x)), "osc_start" : (r"^OSC_START=\s*([\d\.]+)\;", lambda x: float(x)), "twotheta" : (r"^TWOTHETA=\s*([\d\.]+)\;", lambda x: float(x)), "thetadistance": (r"^THETADISTANCE=\s*([\d\.]+)\;", lambda x: float(x)), "axis" : (r"^AXIS=\s*(\w+)\;", lambda x: str(x)), "wavelength" : (r"^WAVELENGTH=\s*([\d\.]+)\;", lambda x: float(x)), "beam_center_x": (r"^BEAM_CENTER_X=\s*([\d\.]+)\;", lambda x: float(x)), "beam_center_y": (r"^BEAM_CENTER_Y=\s*([\d\.]+)\;", lambda x: float(x)), "transmission" : (r"^TRANSMISSION=\s*([\d\.]+)\;", lambda x: float(x)), "puck" : (r"^PUCK=\s*(\w+)\;", lambda x: str(x)), "sample" : (r"^SAMPLE=\s*([\d\w]+)\;", lambda x: str(x)), "ring_cur" : (r"^RING_CUR=\s*([\d\.]+)\;", lambda x: float(x)), "ring_mode" : (r"^RING_MODE=\s*(.*)\;", lambda x: str(x)), "aperture" : (r"^MD2_APERTURE=\s*(\d+)\;", lambda x: int(x))} # "period" : (r"^# Exposure_period\s*([\d\.]+) s", lambda x: float(x)), # "count_cutoff" : (r"^# Count_cutoff\s*(\d+) counts", lambda x: int(x))} count = 0 while count < 10: try: rawdata = open(image, "rb").read() headeropen = rawdata.index("{") headerclose = rawdata.index("}") header = rawdata[headeropen+1:headerclose-headeropen] break except: count += 1 time.sleep(0.1) # Tease out the info from the file name base = os.path.basename(image).rstrip(".img") # The parameters parameters = {"fullname" : image, "detector" : "ADSC-Q315", # directory of the image file "directory" : os.path.dirname(image), # image name without directory or image suffix "basename" : base, # image name without directory, run_number, image_number or image suffix "image_prefix" : "_".join(base.split("_")[0:-2]), #"run_number" : False, "run_number" : int(base.split("_")[-2]), "image_number" : int(base.split("_")[-1]), "axis" : "omega", "run_id" : run_id, "place_in_run" : place_in_run, "count_cutoff": 65535} for label, pat in header_items.iteritems(): pattern = re.compile(pat[0], re.MULTILINE) matches = pattern.findall(header) if len(matches) > 0: parameters[label] = pat[1](matches[-1]) else: parameters[label] = None # Translate the wavelength to energy E = hc/lambda parameters["energy"] = 1239.84193 / parameters["wavelength"] # If twotheta is in use, distance = twothetadist try: if parameters["twotheta"] > 0 and parameters["thetadistance"] > 100: parameters["distance"] = parameters["thetadistance"] except: pass # Look for bad text in certain entries NECAT-code try: json.dumps(parameters["ring_mode"]) except: parameters["ring_mode"] = "Error" # Return parameters to the caller return parameters
def update_run_stats(self, result_db, wedges): """ Publish run statistics to the Redis database for Remote project"s benefit """ if self.logger: self.logger.debug("Remote.update_run_stats") self.logger.debug(result_db) self.logger.debug(wedges) # Derive the session_id session_id = result_db["data_root_dir"].split("_")[-1] # integration_data = {} # Assign the basic data integration_data.update({"status" : result_db.get("integrate_status").lower(), "beamline" : self.beamline, "run_id" : result_db.get("run_id", -1), "spacegroup" : result_db.get("spacegroup", "P0"), "a" : result_db.get("a", -1), "b" : result_db.get("b", -1), "c" : result_db.get("c", -1), "alpha" : result_db.get("alpha", -1), "beta" : result_db.get("beta", -1), "gamma" : result_db.get("gamma", -1), "image_start" : result_db.get("image_start", -1), "image_end" : result_db.get("image_end", -1)}) # Organize the wedges if wedges: for wedge in wedges: wtag = wedge.get("shell_type") if wtag in ("overall", "outer", "inner"): integration_data.update( {"res_low_"+wtag : wedge.get("low_res", -1), "res_high_"+wtag : wedge.get("high_res", -1), "completeness_"+wtag : wedge.get("completeness", -1), "multiplicity_"+wtag : wedge.get("multiplicity", -1), "i_sigi_"+wtag : wedge.get("i_sigma", -1), "rmeas_"+wtag : wedge.get("r_meas", -1), "rpim_"+wtag : wedge.get("r_pim", -1), "anom_completeness_"+wtag : wedge.get("anom_completeness", -1), "anom_multiplicity_"+wtag : wedge.get("anom_multiplicity", -1), "anom_rmeas_"+wtag : wedge.get("r_meas_pm", -1), "anom_rpim_"+wtag : wedge.get("r_pim_pm", -1), "anom_corr_"+wtag : wedge.get("anom_correlation", -1), "anom_slope_"+wtag : wedge.get("anom_slope", -1), "ref_total_"+wtag : wedge.get("total_obs", -1), "ref_unique_"+wtag : wedge.get("unique_obs", -1) }) if self.logger: self.logger.debug("Sending integration_data to remote") self.logger.debug(integration_data) # Publish to redis connection #self.redis.publish("run_stats_"+self.beamline,data) # Save the data to redis j_data = json.dumps(integration_data) # Publish to redis connections self.redis.publish("run_stats_"+self.beamline, j_data) # Save to Mongo try: self.logger.debug("Saving to mongoDB") self.mongo_remote.integration_data.update( {"run_id":result_db.get("run_id", -1)}, integration_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def run(self): """ The while loop for watching the files """ self.logger.info("NecatGatherer.run") # Set up overwatcher self.ow_registrar = Registrar(site=self.site, ow_type="gatherer", ow_id=self.overwatch_id) self.ow_registrar.register({"site_id":self.site.ID}) # Get redis connection #self.logger.debug(" Will publish new images on filecreate:%s" % self.tag) #self.logger.debug(" Will push new images onto images_collected:%s" % self.tag) self.logger.debug(" Will publish new datasets on run_data:%s" % self.tag) self.logger.debug(" Will push new datasets onto run_data:%s" % self.tag) try: while self.go: # Check if the run info changed in beamline Redis DB. #current_run = self.bl_redis.get("RUN_INFO_SV") current_run = self.redis.rpop('run_info_T') if current_run not in (None, ""): # Split it #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_ # Reset it back to an empty string if beamline is E. #self.bl_redis.set("RUN_INFO_SV", "") # get the additional beamline params and put into nice dict. run_data = self.get_run_data(current_run) # Get rid of trailing slash from beamline Redis. #dir = run_data['directory'] # Have to remove trailing slash #if dir[-1] == '/': # run_data['directory'] = dir[:-1] dir = "/epu/rdma%s%s_%d_%06d" % ( run_data['directory'], run_data['image_prefix'], int(run_data['run_number']), int(run_data['start_image_number'])) if self.ignored(dir): self.logger.debug("Directory %s is marked to be ignored - skipping", dir) else: """ run_data['directory'] = dir self.logger.debug("run_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening self.redis.lpush("run_data:%s" % self.tag, run_data_json) """ ## This loop is for testing## for i in range(2): if i == 1: dir = dir.replace('/epu/', '/epu2/') run_data['directory'] = dir self.logger.debug("run_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening self.redis.lpush("run_data:%s" % self.tag, run_data_json) time.sleep(0.2) # Have Registrar update status self.ow_registrar.update({"site_id":self.site.ID}) except KeyboardInterrupt: self.stop()
def update(self, custom_vars={}): """ Update the status with the central db Keyword arguments: custom_vars -- dict containing custom elements to put in redis database """ # Get connection red = self.redis # Create entry entry = { "ow_type": self.ow_type, "id": self.uuid, "ow_id": self.ow_id, "timestamp": time.time() } # If custom_vars have been passed, add them entry.update(custom_vars) # Check for launchers launcher = entry.get('job_list', False) # Wrap potential redis down try: # Update timestamp red.hset("OW:" + self.uuid, "timestamp", time.time()) # Update any custom_vars for k, v in custom_vars.iteritems(): red.hset("OW:" + self.uuid, k, v) # Expire the current entry in N seconds red.expire("OW:" + self.uuid, OVERWATCH_TIMEOUT) # Announce by publishing red.publish("OW:updating", json.dumps(entry)) # If this process has an overwatcher if not self.ow_id == None: # Put entry in the redis db red.hset("OW:" + self.uuid + ":" + self.ow_id, "timestamp", time.time()) # Update any custom_vars for k, v in custom_vars.iteritems(): red.hset("OW:" + self.uuid + ":" + self.ow_id, k, v) # Expire the current entry in N seconds red.expire("OW:" + self.uuid + ":" + self.ow_id, OVERWATCH_TIMEOUT) # Used by launch_manager to see which launchers are running. if launcher: # Put entry in the redis db red.set("OW:" + launcher, 1) # Expire the current entry in N seconds red.expire("OW:" + launcher, OVERWATCH_TIMEOUT) # Redis is down except redis.exceptions.ConnectionError: print "Redis appears to be down"
def update(self, custom_vars={}): """ Update the status with the central db Keyword arguments: custom_vars -- dict containing custom elements to put in redis database """ # Get connection red = self.redis # Create entry entry = {"ow_type":self.ow_type, "id":self.uuid, "ow_id":self.ow_id, "timestamp":time.time()} # If custom_vars have been passed, add them entry.update(custom_vars) # Check for launchers launcher = entry.get('job_list', False) # Wrap potential redis down try: # Update timestamp red.hset("OW:"+self.uuid, "timestamp", time.time()) # Update any custom_vars for k, v in custom_vars.iteritems(): red.hset("OW:"+self.uuid, k, v) # Expire the current entry in N seconds red.expire("OW:"+self.uuid, OVERWATCH_TIMEOUT) # Announce by publishing red.publish("OW:updating", json.dumps(entry)) # If this process has an overwatcher if not self.ow_id == None: # Put entry in the redis db red.hset("OW:"+self.uuid+":"+self.ow_id, "timestamp", time.time()) # Update any custom_vars for k, v in custom_vars.iteritems(): red.hset("OW:"+self.uuid+":"+self.ow_id, k, v) # Expire the current entry in N seconds red.expire("OW:"+self.uuid+":"+self.ow_id, OVERWATCH_TIMEOUT) # Used to monitor which launchers are running. if launcher: # Put entry in the redis db red.set("OW:"+launcher, 1) # Expire the current entry in N seconds red.expire("OW:"+launcher, OVERWATCH_TIMEOUT) # Redis is down except redis.exceptions.ConnectionError: print "Redis appears to be down"
def run(self): """ The while loop for watching the files """ self.logger.info("NecatGatherer.run") # Set up overwatcher self.ow_registrar = Registrar(site=self.site, ow_type="gatherer", ow_id=self.overwatch_id) self.ow_registrar.register({"site_id": self.site.ID}) # Get redis connection #self.logger.debug(" Will publish new images on filecreate:%s" % self.tag) #self.logger.debug(" Will push new images onto images_collected:%s" % self.tag) self.logger.debug(" Will publish new datasets on run_data:%s" % self.tag) self.logger.debug(" Will push new datasets onto run_data:%s" % self.tag) try: while self.go: # Check if the run info changed in beamline Redis DB. #current_run = self.bl_redis.get("RUN_INFO_SV") current_run = self.redis.rpop('run_info_T') if current_run not in (None, ""): # Split it #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_ # Reset it back to an empty string if beamline is E. #self.bl_redis.set("RUN_INFO_SV", "") # get the additional beamline params and put into nice dict. run_data = self.get_run_data(current_run) # Get rid of trailing slash from beamline Redis. #dir = run_data['directory'] # Have to remove trailing slash #if dir[-1] == '/': # run_data['directory'] = dir[:-1] dir = "/epu/rdma%s%s_%d_%06d" % ( run_data['directory'], run_data['image_prefix'], int(run_data['run_number']), int(run_data['start_image_number'])) if self.ignored(dir): self.logger.debug( "Directory %s is marked to be ignored - skipping", dir) else: """ run_data['directory'] = dir self.logger.debug("run_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening self.redis.lpush("run_data:%s" % self.tag, run_data_json) """ ## This loop is for testing## for i in range(2): if i == 1: dir = dir.replace('/epu/', '/epu2/') run_data['directory'] = dir self.logger.debug("run_data:%s %s", self.tag, run_data) # Put into exchangable format run_data_json = json.dumps(run_data) # Publish to Redis self.redis.publish("run_data:%s" % self.tag, run_data_json) # Push onto redis list in case no one is currently listening self.redis.lpush("run_data:%s" % self.tag, run_data_json) time.sleep(0.2) # Have Registrar update status self.ow_registrar.update({"site_id": self.site.ID}) except KeyboardInterrupt: self.stop()
def update_run_stats(self, result_db, wedges): """ Publish run statistics to the Redis database for Remote project"s benefit """ if self.logger: self.logger.debug("Remote.update_run_stats") self.logger.debug(result_db) self.logger.debug(wedges) # Derive the session_id session_id = result_db["data_root_dir"].split("_")[-1] # integration_data = {} # Assign the basic data integration_data.update({ "status": result_db.get("integrate_status").lower(), "beamline": self.beamline, "run_id": result_db.get("run_id", -1), "spacegroup": result_db.get("spacegroup", "P0"), "a": result_db.get("a", -1), "b": result_db.get("b", -1), "c": result_db.get("c", -1), "alpha": result_db.get("alpha", -1), "beta": result_db.get("beta", -1), "gamma": result_db.get("gamma", -1), "image_start": result_db.get("image_start", -1), "image_end": result_db.get("image_end", -1) }) # Organize the wedges if wedges: for wedge in wedges: wtag = wedge.get("shell_type") if wtag in ("overall", "outer", "inner"): integration_data.update({ "res_low_" + wtag: wedge.get("low_res", -1), "res_high_" + wtag: wedge.get("high_res", -1), "completeness_" + wtag: wedge.get("completeness", -1), "multiplicity_" + wtag: wedge.get("multiplicity", -1), "i_sigi_" + wtag: wedge.get("i_sigma", -1), "rmeas_" + wtag: wedge.get("r_meas", -1), "rpim_" + wtag: wedge.get("r_pim", -1), "anom_completeness_" + wtag: wedge.get("anom_completeness", -1), "anom_multiplicity_" + wtag: wedge.get("anom_multiplicity", -1), "anom_rmeas_" + wtag: wedge.get("r_meas_pm", -1), "anom_rpim_" + wtag: wedge.get("r_pim_pm", -1), "anom_corr_" + wtag: wedge.get("anom_correlation", -1), "anom_slope_" + wtag: wedge.get("anom_slope", -1), "ref_total_" + wtag: wedge.get("total_obs", -1), "ref_unique_" + wtag: wedge.get("unique_obs", -1) }) if self.logger: self.logger.debug("Sending integration_data to remote") self.logger.debug(integration_data) # Publish to redis connection #self.redis.publish("run_stats_"+self.beamline,data) # Save the data to redis j_data = json.dumps(integration_data) # Publish to redis connections self.redis.publish("run_stats_" + self.beamline, j_data) # Save to Mongo try: self.logger.debug("Saving to mongoDB") self.mongo_remote.integration_data.update( {"run_id": result_db.get("run_id", -1)}, integration_data, upsert=True, multi=False) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")
def run(self): if self.logger: self.logger.info('ConsoleRedisMonitor.run') # Create redis connections # Where beamline information is coming from redis_database = importlib.import_module('database.redis_adapter') bl_database = redis_database.Database(settings=self.site.SITE_ADAPTER_SETTINGS) self.bl_redis = bl_database.connect_redis_pool() pipe = self.bl_redis.pipeline() # Where information will be published to #self.pub = BLspec.connect_redis_manager_HA() self.pub_database = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS) self.pub = self.pub_database.connect_redis_manager_HA() # For beamline T #self.pubsub = self.pub.pubsub() #self.pubsub.subscribe('run_info_T') try: # Initial check of the db on startup run_data = self.pub.hgetall("current_run_"+self.beamline) if (run_data): # alert the media self.pub.publish('newdir:'+self.beamline,self.current_dir) # save the info self.pub.set('datadir_'+self.beamline,self.current_dir) # Main loop count = 1 saved_adsc_state = False while(self.Go): # Check the redis db for a new run if self.beamline == "C": current_run, current_dir, current_adsc_state, test = pipe.get('RUN_INFO_SV').get("ADX_DIRECTORY_SV").get("ADSC_SV").set('RUN_INFO_SV','').execute() elif self.beamline == "E": current_run, current_dir, current_adsc_state, test = pipe.get("RUN_INFO_SV").get("EIGER_DIRECTORY_SV").get("EIGER_SV").set('RUN_INFO_SV','').execute() elif self.beamline == "T": #current_dir renamed below, but gets rid of error current_dir, current_adsc_state = pipe.get("EIGER_DIRECTORY_SV").get("EIGER_SV").execute() current_run = self.pub.rpop('run_info_T') #print self.pub.llen('run_info_T') #print self.pub.lrange('run_info_T', 0, -1) #current_run = self.pubsub.get_message()['data'] #print current_run if current_run == None: current_run = '' if (len(current_run) > 0): if self.beamline == "E": self.pub.lpush('run_info_T', current_run) #self.pub.publish('run_info_T', current_run) # Set variable self.current_run = current_run # Split it cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp # Arbitrary wait for Console to update Redis database time.sleep(0.01) # Get extra run data extra_data = self.getRunData() if self.beamline == "T": current_dir = "/epu2/rdma%s%s_%d_%06d" % ( current_dir, extra_data['prefix'], int(cur_run[0]), int(cur_run[1])) # Compose the run_data object run_data = {'directory' : current_dir, 'prefix' : extra_data['prefix'], 'run_number' : int(cur_run[0]), 'start' : int(cur_run[1]), 'total' : int(cur_run[2]), 'distance' : float(cur_run[3]), 'twotheta' : extra_data['twotheta'], 'phi' : extra_data['phi'], 'kappa' : extra_data['kappa'], 'omega' : float(cur_run[6]), 'axis' : 'omega', "width" : float(cur_run[7]), "time" : float(cur_run[8]), "beamline" : self.beamline, "file_source" : beamline_settings[self.beamline]['file_source'], "status" : "STARTED"} # Logging self.logger.info(run_data) #Save data into db self.pub.hmset('current_run_'+self.beamline,run_data) self.pub.publish('current_run_'+self.beamline,json.dumps(run_data)) #Signal the main thread if (self.notify): self.notify(("%s RUN" % beamline_settings[self.beamline]['file_source'],run_data)) # Check if the data collection directory is new if (self.current_dir != current_dir): self.logger.debug("New directory") #save the new dir self.current_dir = current_dir #alert the media self.logger.debug("Publish %s %s" % ('newdir:'+self.beamline,self.current_dir)) self.pub.publish('newdir:'+self.beamline,self.current_dir) #save the info self.pub.set('datadir_'+self.beamline,current_dir) # Watch for run aborting if (current_adsc_state == "ABORTED" and current_adsc_state != saved_adsc_state): # Keep track of the detector state saved_adsc_state = current_adsc_state # Alert the media if (self.notify): self.notify(("%s_ABORT" % beamline_settings[self.beamline]['file_source'],None)) else: saved_adsc_state = current_adsc_state """ #### Turned off, so I dont screw up IDE #send test data for rastersnap heartbeat if (count % 100 == 0): #reset the counter count = 1 # Logging self.logger.info('Publishing filecreate:%s, %s' % (self.beamline, beamline_settings[self.beamline]['rastersnap_test_image'])) # Publish the test image self.pub.publish('filecreate:%s'%self.beamline, beamline_settings[self.beamline]['rastersnap_test_image']) # Watch the crystal & distl params if (count % 60) == 0: try: crystal_request,distl_request,best_request = pipe.get("CP_REQUESTOR_SV").get("DP_REQUESTOR_SV").get("BEST_REQUESTOR_SV").execute() if (distl_request): #if (distl_request != self.current_dpreq): if (distl_request not in self.dpreqs): self.dpreqs.append(distl_request) self.logger.debug(self.dpreqs) self.current_dpreq = distl_request if self.logger: self.logger.debug('ConsoleRedisMonitor New distl parameters request for %s' % distl_request) if (self.notify): self.notify(("DISTL_PARMS_REQUEST",distl_request)) if (crystal_request): #if (crystal_request != self.current_cpreq): if (crystal_request not in self.cpreqs): self.cpreqs.append(crystal_request) self.current_cpreq = crystal_request if self.logger: self.logger.debug('ConsoleRedisMonitor New crystal parameters request for %s' % crystal_request) if (self.notify): self.notify(("CRYSTAL_PARMS_REQUEST",crystal_request)) if (best_request): if (best_request != self.current_breq): self.current_breq = best_request if self.logger: self.logger.debug('ConsoleRedisMonitor New best parameters request') if (self.notify): self.notify(("BEST_PARMS_REQUEST",best_request)) except: self.logger.debug('ConsoleRedisMonitor Exception in querying for tracker requests') """ # Increment the counter count += 1 # Sleep before checking again time.sleep(0.1) except redis.exceptions.ConnectionError: if self.logger: self.logger.debug('ConsoleRedisMonitor failure to connect - will reconnect') time.sleep(10) reconnect_counter = 0 while (reconnect_counter < 1000): try: try: self.red.ping() except: self.red = redis.Redis(beamline_settings[self.beamline]['redis_ip']) try: self.pub.ping() except: """ #self.pub = redis.Redis(beamline_settings[self.beamline]['remote_redis_ip']) self.pub = pysent.RedisManager(sentinel_host="remote.nec.aps.anl.gov", sentinel_port=26379, master_name="remote_master") """ self.pub = BLspec.connect_redis_manager_HA() #test connections self.red.ping() if self.logger: self.logger.debug('Reconnection to redis server successful') break except: reconnect_counter += 1 if self.logger: self.logger.debug('Reconnection attempt %d failed, will try again' % reconnect_counter) time.sleep(10)
def update_image_stats(self, result_db, wedges_db): """Publish image statistics to the Redis database for Remote project"s benefit """ if self.logger: self.logger.debug("Remote.update_image_stats") self.logger.debug(result_db) self.logger.debug(wedges_db) # Data will go in here indexing_data = {} wedge = {} # If the indexing worked if result_db["labelit_status"] == "SUCCESS": indexing_data.update({"status" : "indexed", "beamline" : self.beamline, "fullname" : result_db.get("fullname", 0), "image_id" : result_db.get("image_id", 0), "pointgroup" : result_db.get("labelit_spacegroup"), "a" : result_db.get("labelit_a"), "b" : result_db.get("labelit_b"), "c" : result_db.get("labelit_c"), "alpha" : result_db.get("labelit_alpha"), "beta" : result_db.get("labelit_beta"), "gamma" : result_db.get("labelit_gamma"), "resolution" : result_db.get("distl_labelit_res"), "mosaicity" : result_db.get("labelit_mosaicity"), "overloads" : result_db.get("distl_overloads")}) # If we have a normal strategy if result_db["best_norm_status"] == "SUCCESS": # Get the normal strategy wedge for wedge in wedges_db: if wedge["strategy_type"] == "normal": break indexing_data.update({"status" : "normal", "normal_omega_start" : wedge.get("phi_start", -1), "normal_omega_step" : wedge.get("delta_phi", -1), "normal_number_images" : wedge.get("number_images", -1) }) # If we have an anomalous strategy if result_db["best_anom_status"] == "SUCCESS": # Get the anomalous strategy wedge for wedge in wedges_db: if wedge["strategy_type"] == "anomalous": break indexing_data.update({"status" : "all", "anom_omega_start" : wedge.get("phi_start", -1), "anom_omega_step" : wedge.get("delta_phi", -1), "anom_number_images" : wedge.get("number_images", -1) }) # No indexing solution else: indexing_data.update({"status" : "failure", "beamline" : self.beamline, "fullname" : result_db.get("fullname", 0), "image_id" : result_db.get("image_id", 0) }) # Publish to redis connectioni if self.logger: self.logger.debug("Publishing image_stats_%s %s" % (self.beamline, json.dumps(indexing_data))) self.redis.publish("image_stats_%s" % self.beamline, json.dumps(indexing_data)) # Save to Mongo try: self.mongo_remote.autoindex_data.insert(indexing_data) # TODO - handle mongoDB exceptions except: if self.logger: self.logger.exception("Error writing run_data to MongoDB")