Beispiel #1
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({
                "site_id":
                json.dumps(self.launcher.get('site_tag')),
                "job_list":
                self.job_list
            })

        try:
            timer = 0
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status every second
                if round(timer % 1, 1) in (0.0, 1.0):
                    if self.overwatch_id:
                        #self.ow_registrar.update({"site_id":self.site.ID,
                        self.ow_registrar.update({
                            "site_id":
                            json.dumps(self.launcher.get('site_tag')),
                            "job_list":
                            self.job_list
                        })
                        #self.ow_registrar.update({"job_list":self.job_list})

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen(self.job_list) != 0:
                        command = self.redis.rpop(self.job_list)
                        # Handle the message
                        if command:
                            self.handle_command(json.loads(command))

                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception(
                            "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                        )
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()
Beispiel #2
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        #self.ow_registrar.register({"site_id":self.site.ID})
        self.ow_registrar.register({"site_id":self.tag})

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto runs_data:%s" % self.tag)
        
        # path prefix for RDMA folder location with Eiger
        #if self.tag == 'NECAT_E':
        #    path_prefix = '/epu/rdma'
        #else:
        #    path_prefix = ''

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute()
                # get run info passed from RAPD
                #current_run = self.redis.rpop('run_info_T')
                #current_run = self.redis.rpop('run_info_%s'%self.tag[-1])
                current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1])
                if current_run_raw not in (None, ""):
                    current_run = json.loads(current_run_raw)
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    if self.ignored(run_data['directory']):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory'])
                    else:
                        #run_data['directory'] = dir
                        self.logger.debug("runs_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        #self.redis.publish("run_data:%s" % self.tag, run_data)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("runs_data:%s" % self.tag, run_data_json)
                        #self.redis.lpush("runs_data:%s" % self.tag, run_data)

                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.tag})
        except KeyboardInterrupt:
            self.stop()
Beispiel #3
0
    def run(self):
        """Orchestrate the monitoring for new images in redis db"""

        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        ow_round_interval = 50 # int((5 * len(self.image_lists)) / POLLING_REST)

        # If we are starting clean
        if self.clean_start:
            for tag in self.tags:
                self.redis.delete("images_collected:%s" % tag)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for tag in self.tags:

                    # Try to pop the oldest image off the list
                    new_image = self.redis.rpop("images_collected:%s" % tag)
                    #new_image = self.redis.rpop("images_collected_%s" % tag)

                    # Have a new_image
                    if new_image:
                        # self.logger.debug("New image %s - %s", tag, new_image)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWIMAGE",
                                     "fullname":new_image,
                                     "site_tag":tag})

                        # self.logger.debug("New image data %s", new_image)

                    # Slow it down a little
                    time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()

        self.logger.debug("Exit image monitor loop")
Beispiel #4
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id": self.site.ID})

        # Create socket to listen for commands
        _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        _socket.settimeout(5)
        _socket.bind(("", self.specifications["port"]))

        # This is the server portion of the code
        while 1:
            try:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id": self.site.ID})

                # Listen for connections
                _socket.listen(5)
                conn, address = _socket.accept()

                # Read the message from the socket
                message = ""
                while not message.endswith("<rapd_end>"):
                    try:
                        data = conn.recv(BUFFER_SIZE)
                        message += data
                    except:
                        pass
                    time.sleep(0.01)

                # Close the connection
                conn.close()

                # Handle the message
                self.handle_message(message)

            except socket.timeout:
                pass
                # print "5 seconds up"

        # If we exit...
        _socket.close()
Beispiel #5
0
    def run(self):
        """Orchestrate the monitoring for new images in redis db"""

        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        ow_round_interval = 50 # int((5 * len(self.image_lists)) / POLLING_REST)

        # If we are starting clean
        if self.clean_start:
            for tag in self.tags:
                self.redis.delete("images_collected:%s" % tag)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for tag in self.tags:

                    # Try to pop the oldest image off the list
                    new_image = self.redis.rpop("images_collected:%s" % tag)
                    #new_image = self.redis.rpop("images_collected_%s" % tag)

                    # Have a new_image
                    if new_image:
                        # self.logger.debug("New image %s - %s", tag, new_image)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWIMAGE",
                                     "fullname":new_image,
                                     "site_tag":tag})

                        # self.logger.debug("New image data %s", new_image)

                    # Slow it down a little
                    time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()

        self.logger.debug("Exit image monitor loop")
Beispiel #6
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto runs_data:%s" % self.tag)
        
        # path prefix for RDMA folder location with Eiger
        if self.tag == 'NECAT_E':
            path_prefix = '/epu2/rdma'
        else:
            path_prefix = ''

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute()
                # get run info passed from RAPD
                #current_run = self.redis.rpop('run_info_T')
                #current_run = self.redis.rpop('run_info_%s'%self.tag[-1])
                current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1])
                if current_run_raw not in (None, ""):
                    current_run = json.loads(current_run_raw)
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    if self.ignored(run_data['directory']):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory'])
                    else:
                        #run_data['directory'] = dir
                        self.logger.debug("runs_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        #self.redis.publish("run_data:%s" % self.tag, run_data)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("runs_data:%s" % self.tag, run_data_json)
                        #self.redis.lpush("runs_data:%s" % self.tag, run_data)

                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})
        except KeyboardInterrupt:
            self.stop()
Beispiel #7
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id":self.site.ID})

        # Create socket to listen for commands
        _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        _socket.settimeout(5)
        _socket.bind(("", self.specifications["port"]))

        # This is the server portion of the code
        while 1:
            try:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id":self.site.ID})

                # Listen for connections
                _socket.listen(5)
                conn, address = _socket.accept()

                # Read the message from the socket
                message = ""
                while not message.endswith("<rapd_end>"):
                    try:
                        data = conn.recv(BUFFER_SIZE)
                        message += data
                    except:
                        pass
                    time.sleep(0.01)

                # Close the connection
                conn.close()

                # Handle the message
                self.handle_message(message)

            except socket.timeout:
                pass
                # print "5 seconds up"

        # If we exit...
        _socket.close()
Beispiel #8
0
    def run(self):
        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        # ow_round_interval = max(int((5 * len(self.run_lists)) / POLLING_REST), int(5/POLLING_REST))
        ow_round_interval = 10

        self.logger.debug("Finished registering %d", ow_round_interval)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for run_list, site_tag in self.run_lists:
                    raw_run_data = self.redis.rpop(run_list)
                    # Have new run data
                    if raw_run_data not in (None, ""):
                        # Parse into python object
                        #print raw_run_data
                        run_data = json.loads(raw_run_data)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWRUN",
                                     "run_data":run_data,
                                     "site_tag":site_tag})

                        self.logger.debug("New run data %s", raw_run_data)

                    # Slow it down a little
                    time.sleep(POLLING_REST)
                time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()
Beispiel #9
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id":self.site.ID,
                                        "job_list":self.job_list})

        try:
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id":self.site.ID,
                                              "job_list":self.job_list})

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen(self.job_list) != 0:
                        command = self.redis.rpop(self.job_list)
                        # Handle the message
                        if command:
                            self.handle_command(json.loads(command))

                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()
Beispiel #10
0
class Monitor(threading.Thread):
    """Monitor for new data collection images to be submitted to a redis instance"""

    # Used for stopping/starting the loop
    running = True

    # The connection to the Redis database
    redis = None

    # Storage for where to look for information
    tags = []

    # Overwatch
    ow_registrar = None

    def __init__(self,
                 site,
                 clean_start=False,
                 notify=None,
                 overwatch_id=None):
        """
        Initialize the monitor

        Keyword arguments:
        site -- site description
        notify - Function called when image is captured
        overwatch_id -- id for optional overwather wrapper
        """

        # Get the logger
        self.logger = logging.getLogger("RAPDLogger")

        # Initialize the thread
        threading.Thread.__init__(self)

        # Passed-in variables
        self.site = site
        self.clean_start = clean_start
        self.notify = notify
        self.overwatch_id = overwatch_id

        # Figure out tag(s)
        self.get_tags()

        # Start the thread
        # self.daemon = True
        self.start()

    def get_tags(self):
        """Transform site.ID into tag[s] for image monitor"""

        # A string is input - one tag
        if isinstance(self.site.ID, str):
            self.tags = [self.site.ID.upper()]

        # Tuple or list
        elif isinstance(self.site.ID, tuple) or isinstance(self.site.ID, list):
            for site_id in self.site.ID:
                self.tags.append(site_id.upper())

    def stop(self):
        """Stop the process of polling the redis instance"""

        self.logger.debug("Stopping")

        self.running = False
        self.redis_database.stop()

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')
        #self.redis_database = redis_database.Database(settings=self.site.IMAGE_MONITOR_SETTINGS)
        #self.redis = self.redis_database.connect_to_redis()
        self.redis = redis_database.Database(settings=self.site.IMAGE_MONITOR_SETTINGS)

    def run(self):
        """Orchestrate the monitoring for new images in redis db"""

        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        ow_round_interval = 50 # int((5 * len(self.image_lists)) / POLLING_REST)

        # If we are starting clean
        if self.clean_start:
            for tag in self.tags:
                self.redis.delete("images_collected:%s" % tag)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for tag in self.tags:

                    # Try to pop the oldest image off the list
                    new_image = self.redis.rpop("images_collected:%s" % tag)
                    #new_image = self.redis.rpop("images_collected_%s" % tag)

                    # Have a new_image
                    if new_image:
                        # self.logger.debug("New image %s - %s", tag, new_image)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWIMAGE",
                                     "fullname":new_image,
                                     "site_tag":tag})

                        # self.logger.debug("New image data %s", new_image)

                    # Slow it down a little
                    time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()

        self.logger.debug("Exit image monitor loop")
Beispiel #11
0
class Launcher(object):
    """
    Runs a socket server and spawns new threads using defined launcher_adapter
    when connections are received
    """

    database = None
    adapter = None
    # address = None
    ip_address = None
    tag = None
    port = None
    job_types = None
    adapter_file = None
    launcher = None

    def __init__(self, site, tag="", logger=None, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        tag -- optional string describing launcher. Defined in site.LAUNCHER_REGISTER
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """

        # Get the logger Instance
        self.logger = logger

        # Save passed-in variables
        self.site = site
        self.tag = tag
        self.overwatch_id = overwatch_id

        # Retrieve settings for this Launcher
        self.get_settings()

        # Load the adapter
        self.load_adapter()

        # Start listening for commands
        self.run()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id":self.site.ID})

        # Create socket to listen for commands
        _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        _socket.settimeout(5)
        _socket.bind(("", self.specifications["port"]))

        # This is the server portion of the code
        while 1:
            try:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id":self.site.ID})

                # Listen for connections
                _socket.listen(5)
                conn, address = _socket.accept()

                # Read the message from the socket
                message = ""
                while not message.endswith("<rapd_end>"):
                    try:
                        data = conn.recv(BUFFER_SIZE)
                        message += data
                    except:
                        pass
                    time.sleep(0.01)

                # Close the connection
                conn.close()

                # Handle the message
                self.handle_message(message)

            except socket.timeout:
                pass
                # print "5 seconds up"

        # If we exit...
        _socket.close()

    def handle_message(self, message):
        """
        Handle an incoming message

        Keyword arguments:
        message -- raw message from socket
        """

        self.logger.debug("Message received: %s", message)

        # Strip the message of its delivery tags
        message = message.rstrip().replace("<rapd_start>", "").replace("<rapd_end>", "")

        # Use the adapter to launch
        self.adapter(self.site, message, self.specifications)

    def get_settings(self):
        """
        Get the settings for this Launcher based on ip address and tag
        """

        # Save typing
        launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_REGISTER"]

        # Get IP Address
        self.ip_address = utils.site.get_ip_address()
        self.logger.debug("Found ip address to be %s", self.ip_address)

        # Look for the launcher matching this ip_address and the input tag
        possible_tags = []
        for launcher in launchers:
            if launcher[0] == self.ip_address and launcher[1] == self.tag:
                self.launcher = launcher
                break
            elif launcher[0] == self.ip_address:
                possible_tags.append(launcher[1])

        # No launcher adapter
        if self.launcher is None:

            # No launchers for this IP address
            if len(possible_tags) == 0:
                print "  There are no launcher adapters registered for this ip address"
            # IP Address in launchers, but not the input tag
            else:
                print text.error + "There is a launcher adapter registered for thi\
s IP address (%s), but not for the input tag (%s)" % (self.ip_address, self.tag)
                print "  Available tags for this IP address:"
                for t in possible_tags:
                    print "    %s" % t
                print text.stop

            # Exit in error state
            sys.exit(9)
        else:
            # Unpack address
            self.ip_address, self.tag, self.launcher_id = self.launcher
            self.specifications = self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"][self.launcher_id]
            # Tag launcher in self.site
            self.site.LAUNCHER_ID = self.launcher_id

    def load_adapter(self):
        """Find and load the adapter"""

        # Import the database adapter as database module
        self.adapter = load_module(
            seek_module=self.specifications["adapter"],
            directories=self.site.LAUNCHER_SETTINGS["RAPD_LAUNCHER_ADAPTER_DIRECTORIES"]).LauncherAdapter

        self.logger.debug(self.adapter)
Beispiel #12
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("Gatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # A RUN & IMAGES EXAMPLE
        # Some logging
        self.logger.debug("  Will publish new images on filecreate:C")
        self.logger.debug("  Will publish new images on image_collected:C")
        self.logger.debug("  Will push new images onto images_collected:C")
        self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
        self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        # Set up the WatchManager
        watch_manager = pyinotify.WatchManager()

        # Set up the notifier for files being made
        notifier = pyinotify.ThreadedNotifier(watch_manager, EventHandler(redis_rapd=self.redis_rapd,
                                                                          redis_remote=self.redis_remote,
                                                                          logger=self.logger))
        notifier.start()

        # Try exiting the pyinotify gracefully
        def exit_gracefully():
            """
            Exit pyinotify properly when program exits
            """
            self.logger.debug("Attempting to gracefully shut down")
            watch_manager.rm_watch(wdd.values())
            notifier.stop()
        atexit.register(exit_gracefully)

        # Start by adding the current dir in the beamline redis db
        DATA_DIR = "ADX_DIRECTORY_SV" # "datadir_%s" % self.tag

        # Listen for new directory
        current_dir = ""
        time.sleep(0.5)
        counter = 0
        try:
            while True:
                print counter
                newdir = self.redis_beamline.get(DATA_DIR)
                if (newdir != current_dir):
                    have = False
                    current_dir = newdir
                    self.logger.debug("New directory to watch %s'" % newdir)
                    DirectoryHandler(current_dir=newdir, 
                                     watch_manager=watch_manager,
                                     logger=self.logger)
                time.sleep(1)
                # Update overwatcher every 5 seconds
                if counter % 5 == 0:
                    self.ow_registrar.update({"site_id":self.site.ID})
                    counter = 0
                else:
                    counter += 1

        # Exited by keyboard
        except KeyboardInterrupt:
            self.stop()
Beispiel #13
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """

    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the NecatGatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("NecatGatherer.__init__")

        # Get our bearings
        self.set_host()

        # Connect to redis
        self.connect()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        #self.ow_registrar.register({"site_id":self.site.ID})
        self.ow_registrar.register({"site_id":self.tag})

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto runs_data:%s" % self.tag)
        
        # path prefix for RDMA folder location with Eiger
        #if self.tag == 'NECAT_E':
        #    path_prefix = '/epu/rdma'
        #else:
        #    path_prefix = ''

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute()
                # get run info passed from RAPD
                #current_run = self.redis.rpop('run_info_T')
                #current_run = self.redis.rpop('run_info_%s'%self.tag[-1])
                current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1])
                if current_run_raw not in (None, ""):
                    current_run = json.loads(current_run_raw)
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    if self.ignored(run_data['directory']):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory'])
                    else:
                        #run_data['directory'] = dir
                        self.logger.debug("runs_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        #self.redis.publish("run_data:%s" % self.tag, run_data)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("runs_data:%s" % self.tag, run_data_json)
                        #self.redis.lpush("runs_data:%s" % self.tag, run_data)

                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.tag})
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("NecatGatherer.stop")

        self.go = False
        # Close the lock file
        close_lock_file()

    def connect(self):
        """Connect to redis host"""
        # Connect to control redis for publishing run data info
        redis_database = importlib.import_module('database.redis_adapter')
        self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)

        # Connect to beamline Redis to monitor if run is launched
        self.bl_redis = redis_database.Database(settings=self.site.SITE_ADAPTER_SETTINGS[self.tag])

    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("NecatGatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug(self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"
            # sys.exit(9)

    def ignored(self, dir):
        """Check if folder is supposed to be ignored."""
        for d in self.site.IMAGE_IGNORE_DIRECTORIES:
            if dir.startswith(d):
                return True
        return False

    def get_run_data(self, run_info):
        """Put together info from run and pass it back."""
        # Split it
        cur_run = run_info.split("_") #runnumber,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
        #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
        pipe = self.bl_redis.pipeline()
        #pipe.get("DETECTOR_SV")
        if self.tag == 'NECAT_C':
            pipe.get("ADX_DIRECTORY_SV")
        else:
            pipe.get("EIGER_DIRECTORY_SV")
        pipe.get("RUN_PREFIX_SV")
        #pipe.get("DET_THETA_SV")        #two theta
        #pipe.get("MD2_ALL_AXES_SV")     #for kappa and phi
        return_array = pipe.execute()
        # extend path with the '0_0' to path for Pilatus
        if self.tag == 'NECAT_C':
            #dir = os.path.join(return_array[0], "0_0")
            dir = '%s%s'%(return_array[0], "0_0")
        else:
            dir = return_array[0]
        # Get rid of trailing slash from beamline Redis.
        if dir[-1] == '/':
            dir = dir[:-1]
        # Standardize the run information
        run_data = {
            "anomalous":None,
            "beamline":self.tag,                                # Non-standard
            #"beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"directory":return_array[0],
            "directory":dir,
            "distance":float(cur_run[3]),
            "energy":float(cur_run[4]),
            #"file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
            "image_prefix":return_array[1],
            "kappa":None,
            "number_images":int(cur_run[2]),
            "omega":None,
            "osc_axis":"phi",
            "osc_start":float(cur_run[6]),
            "osc_width":float(cur_run[7]),
            "phi": 0.0,
            "run_number":int(cur_run[0]),
            "site_tag":self.tag,
            "start_image_number":int(cur_run[1]),
            "time":float(cur_run[8]),
            "transmission":float(cur_run[5]),
            "twotheta":None,
        }

        return run_data
Beispiel #14
0
class Launcher(object):
    """
    Runs a socket server and spawns new threads using defined launcher_adapter
    when connections are received
    """

    database = None
    adapter = None
    # address = None
    ip_address = None
    tag = None
    port = None
    job_types = None
    adapter_file = None
    launcher = None

    def __init__(self, site, tag="", logger=None, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        tag -- optional string describing launcher. Defined in site.LAUNCHER_REGISTER
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """

        # Get the logger Instance
        self.logger = logger

        # Save passed-in variables
        self.site = site
        self.tag = tag
        self.overwatch_id = overwatch_id

        # Retrieve settings for this Launcher
        self.get_settings()

        # Load the adapter
        self.load_adapter()

        # Start listening for commands
        self.run()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id": self.site.ID})

        # Create socket to listen for commands
        _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        _socket.settimeout(5)
        _socket.bind(("", self.specifications["port"]))

        # This is the server portion of the code
        while 1:
            try:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id": self.site.ID})

                # Listen for connections
                _socket.listen(5)
                conn, address = _socket.accept()

                # Read the message from the socket
                message = ""
                while not message.endswith("<rapd_end>"):
                    try:
                        data = conn.recv(BUFFER_SIZE)
                        message += data
                    except:
                        pass
                    time.sleep(0.01)

                # Close the connection
                conn.close()

                # Handle the message
                self.handle_message(message)

            except socket.timeout:
                pass
                # print "5 seconds up"

        # If we exit...
        _socket.close()

    def handle_message(self, message):
        """
        Handle an incoming message

        Keyword arguments:
        message -- raw message from socket
        """

        self.logger.debug("Message received: %s", message)

        # Strip the message of its delivery tags
        message = message.rstrip().replace("<rapd_start>",
                                           "").replace("<rapd_end>", "")

        # Use the adapter to launch
        self.adapter(self.site, message, self.specifications)

    def get_settings(self):
        """
        Get the settings for this Launcher based on ip address and tag
        """

        # Save typing
        launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_REGISTER"]

        # Get IP Address
        self.ip_address = utils.site.get_ip_address()
        self.logger.debug("Found ip address to be %s", self.ip_address)

        # Look for the launcher matching this ip_address and the input tag
        possible_tags = []
        for launcher in launchers:
            if launcher[0] == self.ip_address and launcher[1] == self.tag:
                self.launcher = launcher
                break
            elif launcher[0] == self.ip_address:
                possible_tags.append(launcher[1])

        # No launcher adapter
        if self.launcher is None:

            # No launchers for this IP address
            if len(possible_tags) == 0:
                print "  There are no launcher adapters registered for this ip address"
            # IP Address in launchers, but not the input tag
            else:
                print text.error + "There is a launcher adapter registered for thi\
s IP address (%s), but not for the input tag (%s)" % (self.ip_address,
                                                      self.tag)
                print "  Available tags for this IP address:"
                for t in possible_tags:
                    print "    %s" % t
                print text.stop

            # Exit in error state
            sys.exit(9)
        else:
            # Unpack address
            self.ip_address, self.tag, self.launcher_id = self.launcher
            self.specifications = self.site.LAUNCHER_SETTINGS[
                "LAUNCHER_SPECIFICATIONS"][self.launcher_id]
            # Tag launcher in self.site
            self.site.LAUNCHER_ID = self.launcher_id

    def load_adapter(self):
        """Find and load the adapter"""

        # Import the database adapter as database module
        self.adapter = load_module(
            seek_module=self.specifications["adapter"],
            directories=self.site.LAUNCHER_SETTINGS[
                "RAPD_LAUNCHER_ADAPTER_DIRECTORIES"]).LauncherAdapter

        self.logger.debug(self.adapter)
Beispiel #15
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """

    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the NecatGatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("NecatGatherer.__init__")

        # Get our bearings
        self.set_host()

        # Connect to redis
        self.connect()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto runs_data:%s" % self.tag)
        
        # path prefix for RDMA folder location with Eiger
        if self.tag == 'NECAT_E':
            path_prefix = '/epu2/rdma'
        else:
            path_prefix = ''

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.pipe.get("RUN_INFO_SV").set("RUN_INFO_SV", "").execute()
                # get run info passed from RAPD
                #current_run = self.redis.rpop('run_info_T')
                #current_run = self.redis.rpop('run_info_%s'%self.tag[-1])
                current_run_raw = self.redis.rpop('run_info_%s'%self.tag[-1])
                if current_run_raw not in (None, ""):
                    current_run = json.loads(current_run_raw)
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    if self.ignored(run_data['directory']):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", run_data['directory'])
                    else:
                        #run_data['directory'] = dir
                        self.logger.debug("runs_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        #self.redis.publish("run_data:%s" % self.tag, run_data)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("runs_data:%s" % self.tag, run_data_json)
                        #self.redis.lpush("runs_data:%s" % self.tag, run_data)

                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("NecatGatherer.stop")

        self.go = False
        #self.redis_database.stop()
        #self.bl_database.stop()

    def connect(self):
        """Connect to redis host"""
        # Connect to control redis for publishing run data info
        redis_database = importlib.import_module('database.redis_adapter')

        #self.redis_database = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        #self.redis = self.redis_database.connect_to_redis()
        self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)

        # Connect to beamline Redis to monitor if run is launched
        #self.bl_database = redis_database.Database(settings=self.site.SITE_ADAPTER_SETTINGS[self.tag])
        #self.bl_redis = self.bl_database.connect_redis_pool()
        self.bl_redis = redis_database.Database(settings=self.site.SITE_ADAPTER_SETTINGS[self.tag])
        #self.pipe = self.bl_redis.pipeline()

    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("NecatGatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug(self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"
            # sys.exit(9)

    def ignored(self, dir):
        """Check if folder is supposed to be ignored."""
        for d in self.site.IMAGE_IGNORE_DIRECTORIES:
            if dir.startswith(d):
                return True
        return False

    def get_run_data(self, run_info):
        """Put together info from run and pass it back."""
        # Split it
        cur_run = run_info.split("_") #runnumber,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
        #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
        pipe = self.bl_redis.pipeline()
        #pipe.get("DETECTOR_SV")
        if self.tag == 'NECAT_C':
            pipe.get("ADX_DIRECTORY_SV")
        else:
            pipe.get("EIGER_DIRECTORY_SV")
        pipe.get("RUN_PREFIX_SV")
        #pipe.get("DET_THETA_SV")        #two theta
        #pipe.get("MD2_ALL_AXES_SV")     #for kappa and phi
        return_array = pipe.execute()
        # extend path with the '0_0' to path for Pilatus
        if self.tag == 'NECAT_C':
            #dir = os.path.join(return_array[0], "0_0")
            dir = '%s%s'%(return_array[0], "0_0")
        else:
            dir = return_array[0]
        # Get rid of trailing slash from beamline Redis.
        if dir[-1] == '/':
            dir = dir[:-1]
        # Standardize the run information
        run_data = {
            "anomalous":None,
            "beamline":self.tag,                                # Non-standard
            #"beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"directory":return_array[0],
            "directory":dir,
            "distance":float(cur_run[3]),
            "energy":float(cur_run[4]),
            #"file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
            "image_prefix":return_array[1],
            "kappa":None,
            "number_images":int(cur_run[2]),
            "omega":None,
            "osc_axis":"phi",
            "osc_start":float(cur_run[6]),
            "osc_width":float(cur_run[7]),
            "phi": 0.0,
            "run_number":int(cur_run[0]),
            "site_tag":self.tag,
            "start_image_number":int(cur_run[1]),
            "time":float(cur_run[8]),
            "transmission":float(cur_run[5]),
            "twotheta":None,
        }

        return run_data
Beispiel #16
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """
    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the Gatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("Gatherer.__init__")

        # Get our bearings
        self.set_host()

        # Connect to redis
        self.connect()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("Gatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # A RUN & IMAGES EXAMPLE
        # Some logging
        self.logger.debug("  Will publish new images on filecreate:C")
        self.logger.debug("  Will publish new images on image_collected:C")
        self.logger.debug("  Will push new images onto images_collected:C")
        self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
        self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        # Set up the WatchManager
        watch_manager = pyinotify.WatchManager()

        # Set up the notifier for files being made
        notifier = pyinotify.ThreadedNotifier(watch_manager, EventHandler(redis_rapd=self.redis_rapd,
                                                                          redis_remote=self.redis_remote,
                                                                          logger=self.logger))
        notifier.start()

        # Try exiting the pyinotify gracefully
        def exit_gracefully():
            """
            Exit pyinotify properly when program exits
            """
            self.logger.debug("Attempting to gracefully shut down")
            watch_manager.rm_watch(wdd.values())
            notifier.stop()
        atexit.register(exit_gracefully)

        # Start by adding the current dir in the beamline redis db
        DATA_DIR = "ADX_SUBDIR_SV" # "datadir_%s" % self.tag

        # Listen for new directory
        current_dir = ""
        time.sleep(0.5)
        counter = 0
        try:
            while True:
                print counter
                newdir = self.redis_beamline.get(DATA_DIR)
                if (newdir != current_dir):
                    have = False
                    current_dir = newdir
                    self.logger.debug("New directory to watch %s'" % newdir)
                    DirectoryHandler(current_dir=newdir, 
                                     watch_manager=watch_manager,
                                     logger=self.logger)
                time.sleep(1)
                # Update overwatcher every 5 seconds
                if counter % 5 == 0:
                    self.ow_registrar.update({"site_id":self.site.ID})
                    counter = 0
                else:
                    counter += 1

        # Exited by keyboard
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("Gatherer.stop")

        self.go = False


    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("Gatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug("IP Address: %s" % self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"

    def connect(self):
        """
        Connect to redis host
        """

        self.logger.debug("Gatherer.connect")

        # Connect to RAPD Redis
        self.redis_rapd = Database(settings=self.site.CONTROL_DATABASE_SETTINGS)

        # NECAT uses Redis to communicate with the beamline
        # Connect to beamline Redis to monitor if run is launched
        self.redis_beamline = Database(settings=self.site.SITE_ADAPTER_SETTINGS[self.tag])

        # NECAT uses Redis to communicate with the remote system
        # Connect to remote system Redis to monitor if run is launched
        self.redis_remote = Database(settings=self.site.REMOTE_ADAPTER_SETTINGS)
Beispiel #17
0
    def run(self):
        """
        The while loop for watching the files
        """
        print "run"
        self.logger.info("SercatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # Get redis connection
        red = redis.Redis(connection_pool=self.redis_pool)
        
        print "  Will publish new runs on run_data:%s" % self.tag
        print "  Will push new runs onto runs_data:%s" % self.tag
        self.logger.debug("  Will publish new runs on run_data:%s" % self.tag)
        self.logger.debug("  Will push new runs onto runs_data:%s" % self.tag)
        
        if self.tag == 'SERCAT_BM':
            print "  Will publish new images on image_collected:%s" % self.tag
            print "  Will push new images onto images_collected:%s" % self.tag
            self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
            self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        try:
            while self.go:

                #print "go"
                if self.tag == 'SERCAT_BM':
                    # 5 rounds of checking
                    for ___ in range(5):
                        # Check if the run info has changed on the disk
                        if self.check_for_run_info():
                            run_data = self.get_run_data()
                            if run_data:
                                self.logger.debug("run(s)_data:%s %s", self.tag, run_data)
                                # Put into exchangable format
                                run_data_json = json.dumps(run_data)
                                # Publish to Redis
                                red.publish("run_data:%s" % self.tag, run_data_json)
                                # Push onto redis list in case no one is currently listening
                                red.lpush("runs_data:%s" % self.tag, run_data_json)
                            # 20 image checks
                            for __ in range(20):
                                # Check if the image file has changed
                                if self.check_for_image_collected():
                                    image_name = self.get_image_data()
                                    if image_name:
                                        self.logger.debug("image_collected:%s %s",
                                                          self.tag,
                                                          image_name)
                                        # Publish to Redis
                                        red.publish("image_collected:%s" % self.tag, image_name)
                                        # Push onto redis list in case no one is currently listening
                                        red.lpush("images_collected:%s" % self.tag, image_name)
                                    break
                                else:
                                    time.sleep(0.05)
                # For SERCAT_ID
                else:
                    # Check if the run info has changed on the disk
                    if self.check_for_run_info():
                        run_data = self.get_run_data()
                        if run_data:
                            self.logger.debug("run(s)_data:%s %s", self.tag, run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            red.publish("run_data:%s" % self.tag, run_data_json)
                            # Push onto redis list in case no one is currently listening
                            red.lpush("runs_data:%s" % self.tag, run_data_json)

                    else:
                        time.sleep(1.0)

                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})

        except KeyboardInterrupt:
            self.stop()
Beispiel #18
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """

    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the SercatGatherer
        """
        print "__init__"
        # Get the logger Instance
        # self.logger = logging.getLogger("RAPDLogger")
        self.logger = utils.log.get_logger(logfile_dir=site.LOGFILE_DIR,
                                           logfile_id="rapd_gather",
                                           #level=log_level
                                           )

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("SercatGatherer.__init__")

        # Connect to redis
        self.connect()

        # Get our bearings
        self.set_host()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        print "run"
        self.logger.info("SercatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # Get redis connection
        red = redis.Redis(connection_pool=self.redis_pool)
        
        print "  Will publish new runs on run_data:%s" % self.tag
        print "  Will push new runs onto runs_data:%s" % self.tag
        self.logger.debug("  Will publish new runs on run_data:%s" % self.tag)
        self.logger.debug("  Will push new runs onto runs_data:%s" % self.tag)
        
        if self.tag == 'SERCAT_BM':
            print "  Will publish new images on image_collected:%s" % self.tag
            print "  Will push new images onto images_collected:%s" % self.tag
            self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
            self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        try:
            while self.go:

                #print "go"
                if self.tag == 'SERCAT_BM':
                    # 5 rounds of checking
                    for ___ in range(5):
                        # Check if the run info has changed on the disk
                        if self.check_for_run_info():
                            run_data = self.get_run_data()
                            if run_data:
                                self.logger.debug("run(s)_data:%s %s", self.tag, run_data)
                                # Put into exchangable format
                                run_data_json = json.dumps(run_data)
                                # Publish to Redis
                                red.publish("run_data:%s" % self.tag, run_data_json)
                                # Push onto redis list in case no one is currently listening
                                red.lpush("runs_data:%s" % self.tag, run_data_json)
                            # 20 image checks
                            for __ in range(20):
                                # Check if the image file has changed
                                if self.check_for_image_collected():
                                    image_name = self.get_image_data()
                                    if image_name:
                                        self.logger.debug("image_collected:%s %s",
                                                          self.tag,
                                                          image_name)
                                        # Publish to Redis
                                        red.publish("image_collected:%s" % self.tag, image_name)
                                        # Push onto redis list in case no one is currently listening
                                        red.lpush("images_collected:%s" % self.tag, image_name)
                                    break
                                else:
                                    time.sleep(0.05)
                # For SERCAT_ID
                else:
                    # Check if the run info has changed on the disk
                    if self.check_for_run_info():
                        run_data = self.get_run_data()
                        if run_data:
                            self.logger.debug("run(s)_data:%s %s", self.tag, run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            red.publish("run_data:%s" % self.tag, run_data_json)
                            # Push onto redis list in case no one is currently listening
                            red.lpush("runs_data:%s" % self.tag, run_data_json)

                    else:
                        time.sleep(1.0)

                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})

        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("SercatGatherer.stop")

        self.go = False

    def connect(self):
        """Connect to redis host"""

        # Connect to redis
        self.redis_pool = redis.ConnectionPool(host=self.site.IMAGE_MONITOR_REDIS_HOST)

    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("SercatGatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug(self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            self.image_data_file, self.run_data_file, self.tag = self.site.GATHERERS[self.ip_address]

            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"
            # sys.exit(9)

    """
    Collected Image Information
    """
    def get_image_data(self):
        """
        Coordinates the retrieval of image data
        Called if image information file modification time is newer than the time in memory
        """

        # Get the image data line(s)
        image_lines = self.get_image_line()

        # Parse the lines
        image_data = self.parse_image_line(image_lines)

        # Return the parsed data
        return image_data

    def check_for_image_collected(self):
        """
        Returns True if image information file has new timestamp, False if not
        """
        tries = 0
        while tries < 5:
            try:
                statinfo = os.stat(self.image_data_file)
                break
            except:
                if tries == 4:
                    return False
                time.sleep(0.01)
                tries += 1

        # The modification time has not changed
        if self.image_time == statinfo.st_mtime:
            return False
        # The file has changed
        else:
            self.image_time = statinfo.st_mtime
            return True

    def get_image_line(self):
        """
        return contents of xf_status
        """
        # Copy the file to prevent conflicts with other programs
        # HACK
        tmp_file = "/tmp/"+uuid.uuid4().hex
        shutil.copyfile(self.image_data_file, tmp_file)

        # Read in the lines of the file
        in_lines = open(tmp_file, "r").readlines()

        # Remove the temporary file
        os.unlink(tmp_file)

        return in_lines

    def parse_image_line(self, lines):
        """
        Parse the lines from the image information file and return a dict that
        is somewhat intelligible. Expect the file to look something like:
        8288 /data/BM_Emory_jrhorto.raw/xdc5/x13/x13_015/XDC-5_Pn13_r1_1.0400
        """

        try:
            for i in range(len(lines)):
                sline = lines[i].split()
                if len(sline) == 2:
                    if sline[1].strip() == "<none>":
                        self.logger.debug("image_data_file empty")
                        image_name = False
                        break
                    else:
                        # image_name = os.path.realpath(sline[1])
                        image_name = sline[1]
                        break

            self.logger.debug("SercatGatherer.parse_image_line - %s", image_name)
            return image_name
        except:
            self.logger.exception("Failure to parse image data file - error in format?")
            return False

    """
    Run information methods
    """
    def check_for_run_info(self):
        """
        Returns True if run_data_file has been changed, False if not
        """

        # Make sure we have a file to check
        if self.run_data_file:
            tries = 0
            while tries < 5:
                try:
                    statinfo = os.stat(self.run_data_file)
                    break
                except:
                    if tries == 4:
                        return False
                    time.sleep(0.01)
                    tries += 1

            # The modification time has not changed
            if self.run_time == statinfo.st_ctime:
                return False

            # The file has changed
            else:
                self.run_time = statinfo.st_ctime
                return True
        else:
            return False

    def get_run_data(self):
        """
        Return contents of run data file
        """

        if self.run_data_file:

            # Copy the file to prevent conflicts with other programs
            # Use the ramdisk if it is available
            if os.path.exists("/dev/shm"):
                tmp_dir = "/dev/shm/"
            else:
                tmp_dir = "/tmp/"

            tmp_file = tmp_dir+uuid.uuid4().hex
            shutil.copyfile(self.run_data_file, tmp_file)

            # Read in the pickled file
            f = open(tmp_file, "rb")
            raw_run_data = pickle.load(f)
            f.close()
            self.logger.debug(raw_run_data)

            # Remove the temporary file
            os.unlink(tmp_file)

            # Standardize the run information
            """
            The current fields saved into the sql datbase adapter are:
                directory,
                image_prefix,
                run_number,
                start_image_number,
                number_images,
                distance,
                phi,
                kappa,
                omega,
                osc_axis,
                osc_start,
                osc_width,
                time,
                transmission,
                energy,
                anomalous
            """
            run_data = {
                "anomalous":None,
                "beamline":raw_run_data.get("beamline", None),              # Non-standard
                "beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
                "beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
                "directory":raw_run_data.get("directory", None),
                "distance":float(raw_run_data.get("dist", 0.0)),
                "energy":float(raw_run_data.get("energy", 0.0)),
                "file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
                "image_prefix":raw_run_data.get("image_prefix", None),
                "kappa":None,
                "number_images":int(float(raw_run_data.get("Nframes", 0))),
                "omega":None,
                "osc_axis":"phi",
                "osc_start":float(raw_run_data.get("start", 0.0)),
                "osc_width":float(raw_run_data.get("width", 0.0)),
                "phi":float(raw_run_data.get("start", 0.0)),
                "run_number":None,
                "site_tag":self.tag,
                "start_image_number":int(float(raw_run_data.get("first_image", 0))),
                "time":float(raw_run_data.get("time", 0.0)),
                "transmission":float(raw_run_data.get("trans", 0.0)),
                "twotheta":None
            }

        else:
            run_data = False

        return run_data
Beispiel #19
0
import utils.site
from utils.text import json
from bson.objectid import ObjectId
from utils.overwatch import Registrar

def get_commandline():
    """Get the commandline variables and handle them"""

    # Parse the commandline arguments
    commandline_description = """Data gatherer for SERCAT ID beamline"""
    parser = argparse.ArgumentParser(parents=[utils.commandline.base_parser],
                                     description=commandline_description)

    return parser.parse_args()


if __name__ == "__main__":

    # Get the commandline args
    commandline_args = get_commandline()
    print commandline_args

    # Determine the site
    site_file = utils.site.determine_site(site_arg=commandline_args.site)

    # Import the site settings
    SITE = importlib.import_module(site_file)

    RG = Registrar(site=SITE, ow_type="test", ow_id=commandline_args.overwatch_id)
    RG.run()
Beispiel #20
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id": self.site.ID})

        # Get redis connection

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" %
                          self.tag)
        self.logger.debug("  Will push new datasets onto run_data:%s" %
                          self.tag)

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.bl_redis.get("RUN_INFO_SV")
                current_run = self.redis.rpop('run_info_T')
                if current_run not in (None, ""):
                    # Split it
                    #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
                    #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
                    # Reset it back to an empty string if beamline is E.
                    #self.bl_redis.set("RUN_INFO_SV", "")
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    # Get rid of trailing slash from beamline Redis.
                    #dir = run_data['directory']
                    # Have to remove trailing slash
                    #if dir[-1] == '/':
                    #    run_data['directory'] = dir[:-1]
                    dir = "/epu/rdma%s%s_%d_%06d" % (
                        run_data['directory'], run_data['image_prefix'],
                        int(run_data['run_number']),
                        int(run_data['start_image_number']))
                    if self.ignored(dir):
                        self.logger.debug(
                            "Directory %s is marked to be ignored - skipping",
                            dir)
                    else:
                        """
                        run_data['directory'] = dir
                        self.logger.debug("run_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                        """
                        ## This loop is for testing##
                        for i in range(2):
                            if i == 1:
                                dir = dir.replace('/epu/', '/epu2/')
                            run_data['directory'] = dir
                            self.logger.debug("run_data:%s %s", self.tag,
                                              run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            self.redis.publish("run_data:%s" % self.tag,
                                               run_data_json)
                            # Push onto redis list in case no one is currently listening
                            self.redis.lpush("run_data:%s" % self.tag,
                                             run_data_json)
                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id": self.site.ID})
        except KeyboardInterrupt:
            self.stop()
Beispiel #21
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """

    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the NecatGatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("NecatGatherer.__init__")

        # Connect to redis
        self.connect()

        # Get our bearings
        self.set_host()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id": self.site.ID})

        # Get redis connection

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" %
                          self.tag)
        self.logger.debug("  Will push new datasets onto run_data:%s" %
                          self.tag)

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.bl_redis.get("RUN_INFO_SV")
                current_run = self.redis.rpop('run_info_T')
                if current_run not in (None, ""):
                    # Split it
                    #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
                    #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
                    # Reset it back to an empty string if beamline is E.
                    #self.bl_redis.set("RUN_INFO_SV", "")
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    # Get rid of trailing slash from beamline Redis.
                    #dir = run_data['directory']
                    # Have to remove trailing slash
                    #if dir[-1] == '/':
                    #    run_data['directory'] = dir[:-1]
                    dir = "/epu/rdma%s%s_%d_%06d" % (
                        run_data['directory'], run_data['image_prefix'],
                        int(run_data['run_number']),
                        int(run_data['start_image_number']))
                    if self.ignored(dir):
                        self.logger.debug(
                            "Directory %s is marked to be ignored - skipping",
                            dir)
                    else:
                        """
                        run_data['directory'] = dir
                        self.logger.debug("run_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                        """
                        ## This loop is for testing##
                        for i in range(2):
                            if i == 1:
                                dir = dir.replace('/epu/', '/epu2/')
                            run_data['directory'] = dir
                            self.logger.debug("run_data:%s %s", self.tag,
                                              run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            self.redis.publish("run_data:%s" % self.tag,
                                               run_data_json)
                            # Push onto redis list in case no one is currently listening
                            self.redis.lpush("run_data:%s" % self.tag,
                                             run_data_json)
                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id": self.site.ID})
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("NecatGatherer.stop")

        #self.go = False
        self.redis_database.stop()
        self.bl_database.stop()

    def connect(self):
        """Connect to redis host"""
        # Connect to control redis for publishing run data info
        redis_database = importlib.import_module('database.redis_adapter')

        self.redis_database = redis_database.Database(
            settings=self.site.CONTROL_DATABASE_SETTINGS)
        self.redis = self.redis_database.connect_to_redis()

        # Connect to beamline Redis to monitor if run is launched
        self.bl_database = redis_database.Database(
            settings=self.site.SITE_ADAPTER_SETTINGS)
        self.bl_redis = self.bl_database.connect_redis_pool()
        #pipe = self.bl_redis.pipeline()

    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("NecatGatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug(self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            #self.image_data_file, self.run_data_file, self.tag = self.site.GATHERERS[self.ip_address]
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"
            # sys.exit(9)

    def ignored(self, dir):
        """Check if folder is supposed to be ignored."""
        for d in self.site.IMAGE_IGNORE_DIRECTORIES:
            if dir.startswith(d):
                return True
        return False

    def get_run_data(self, run_info):
        """Put together info from run and pass it back."""
        # Split it
        cur_run = run_info.split(
            "_"
        )  #runnumber,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp

        pipe = self.bl_redis.pipeline()
        #pipe.get("DETECTOR_SV")
        pipe.get("EIGER_DIRECTORY_SV")
        pipe.get("RUN_PREFIX_SV")
        #pipe.get("DET_THETA_SV")        #two theta
        #pipe.get("MD2_ALL_AXES_SV")     #for kappa and phi
        return_array = pipe.execute()
        """
        run_data = {'directory'   : current_dir,
                                'prefix'      : extra_data['prefix'],
                                'run_number'  : int(cur_run[0]),
                                'start'       : int(cur_run[1]),
                                'total'       : int(cur_run[2]),
                                'distance'    : float(cur_run[3]),
                                'twotheta'    : extra_data['twotheta'],
                                'phi'         : extra_data['phi'],
                                'kappa'       : extra_data['kappa'],
                                'omega'       : float(cur_run[6]),
                                'axis'        : 'omega',
                                "width"       : float(cur_run[7]),
                                "time"        : float(cur_run[8]),
                                "beamline"    : self.beamline,
                                "file_source" : beamline_settings[self.beamline]['file_source'],
                                "status"      : "STARTED"}
        """
        # Standardize the run information
        run_data = {
            "anomalous": None,
            "beamline": self.tag,  # Non-standard
            #"beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            "directory": return_array[0],
            "distance": float(cur_run[3]),
            "energy": float(cur_run[4]),
            #"file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
            "image_prefix": return_array[1],
            "kappa": None,
            "number_images": int(cur_run[2]),
            "omega": None,
            "osc_axis": "phi",
            "osc_start": float(cur_run[6]),
            "osc_width": float(cur_run[7]),
            "phi": 0.0,
            "run_number": int(cur_run[0]),
            "site_tag": self.tag,
            "start_image_number": int(cur_run[1]),
            "time": float(cur_run[8]),
            "transmission": float(cur_run[5]),
            "twotheta": None
        }

        return run_data

    def get_run_data_OLD(self):
        """
        Return contents of run data file
        """

        if self.run_data_file:

            # Copy the file to prevent conflicts with other programs
            # Use the ramdisk if it is available
            if os.path.exists("/dev/shm"):
                tmp_dir = "/dev/shm/"
            else:
                tmp_dir = "/tmp/"

            tmp_file = tmp_dir + uuid.uuid4().hex
            shutil.copyfile(self.run_data_file, tmp_file)

            # Read in the pickled file
            f = open(tmp_file, "rb")
            raw_run_data = pickle.load(f)
            f.close()
            self.logger.debug(raw_run_data)

            # Remove the temporary file
            os.unlink(tmp_file)

            # Standardize the run information
            """
            The current fields saved into the sql datbase adapter are:
                directory,
                image_prefix,
                run_number,
                start_image_number,
                number_images,
                distance,
                phi,
                kappa,
                omega,
                osc_axis,
                osc_start,
                osc_width,
                time,
                transmission,
                energy,
                anomalous
            """
            run_data = {
                "anomalous":
                None,
                "beamline":
                raw_run_data.get("beamline", None),  # Non-standard
                "beam_size_x":
                float(raw_run_data.get("beamsize", 0.0)),  # Non-standard
                "beam_size_y":
                float(raw_run_data.get("beamsize", 0.0)),  # Non-standard
                "directory":
                raw_run_data.get("directory", None),
                "distance":
                float(raw_run_data.get("dist", 0.0)),
                "energy":
                float(raw_run_data.get("energy", 0.0)),
                "file_ctime":
                datetime.datetime.fromtimestamp(self.run_time).isoformat(),
                "image_prefix":
                raw_run_data.get("image_prefix", None),
                "kappa":
                None,
                "number_images":
                int(float(raw_run_data.get("Nframes", 0))),
                "omega":
                None,
                "osc_axis":
                "phi",
                "osc_start":
                float(raw_run_data.get("start", 0.0)),
                "osc_width":
                float(raw_run_data.get("width", 0.0)),
                "phi":
                float(raw_run_data.get("start", 0.0)),
                "run_number":
                None,
                "site_tag":
                self.tag,
                "start_image_number":
                int(float(raw_run_data.get("first_image", 0))),
                "time":
                float(raw_run_data.get("time", 0.0)),
                "transmission":
                float(raw_run_data.get("trans", 0.0)),
                "twotheta":
                None
            }

        else:
            run_data = False

        return run_data
Beispiel #22
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """

    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the NecatGatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("NecatGatherer.__init__")

        # Connect to redis
        self.connect()

        # Get our bearings
        self.set_host()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # Get redis connection

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto run_data:%s" % self.tag)

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.bl_redis.get("RUN_INFO_SV")
                current_run = self.redis.rpop('run_info_T')
                if current_run not in (None, ""):
                    # Split it
                    #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
                    #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
                    # Reset it back to an empty string if beamline is E.
                    #self.bl_redis.set("RUN_INFO_SV", "")
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    # Get rid of trailing slash from beamline Redis.
                    #dir = run_data['directory']
                    # Have to remove trailing slash
                    #if dir[-1] == '/':
                    #    run_data['directory'] = dir[:-1]
                    dir = "/epu/rdma%s%s_%d_%06d" % (
                                      run_data['directory'],
                                      run_data['image_prefix'],
                                      int(run_data['run_number']),
                                      int(run_data['start_image_number']))
                    if self.ignored(dir):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", dir)
                    else:
                        """
                        run_data['directory'] = dir
                        self.logger.debug("run_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                        """
                        ## This loop is for testing##
                        for i in range(2):
                            if i == 1:
                                dir = dir.replace('/epu/', '/epu2/')
                            run_data['directory'] = dir
                            self.logger.debug("run_data:%s %s", self.tag, run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            self.redis.publish("run_data:%s" % self.tag, run_data_json)
                            # Push onto redis list in case no one is currently listening
                            self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("NecatGatherer.stop")

        #self.go = False
        self.redis_database.stop()
        self.bl_database.stop()

    def connect(self):
        """Connect to redis host"""
        # Connect to control redis for publishing run data info
        redis_database = importlib.import_module('database.redis_adapter')

        self.redis_database = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        self.redis = self.redis_database.connect_to_redis()

        # Connect to beamline Redis to monitor if run is launched
        self.bl_database = redis_database.Database(settings=self.site.SITE_ADAPTER_SETTINGS)
        self.bl_redis = self.bl_database.connect_redis_pool()
        #pipe = self.bl_redis.pipeline()

    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("NecatGatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug(self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            #self.image_data_file, self.run_data_file, self.tag = self.site.GATHERERS[self.ip_address]
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"
            # sys.exit(9)

    def ignored(self, dir):
        """Check if folder is supposed to be ignored."""
        for d in self.site.IMAGE_IGNORE_DIRECTORIES:
            if dir.startswith(d):
                return True
        return False

    def get_run_data(self, run_info):
        """Put together info from run and pass it back."""
        # Split it
        cur_run = run_info.split("_") #runnumber,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp

        pipe = self.bl_redis.pipeline()
        #pipe.get("DETECTOR_SV")
        pipe.get("EIGER_DIRECTORY_SV")
        pipe.get("RUN_PREFIX_SV")
        #pipe.get("DET_THETA_SV")        #two theta
        #pipe.get("MD2_ALL_AXES_SV")     #for kappa and phi
        return_array = pipe.execute()

        """
        run_data = {'directory'   : current_dir,
                                'prefix'      : extra_data['prefix'],
                                'run_number'  : int(cur_run[0]),
                                'start'       : int(cur_run[1]),
                                'total'       : int(cur_run[2]),
                                'distance'    : float(cur_run[3]),
                                'twotheta'    : extra_data['twotheta'],
                                'phi'         : extra_data['phi'],
                                'kappa'       : extra_data['kappa'],
                                'omega'       : float(cur_run[6]),
                                'axis'        : 'omega',
                                "width"       : float(cur_run[7]),
                                "time"        : float(cur_run[8]),
                                "beamline"    : self.beamline,
                                "file_source" : beamline_settings[self.beamline]['file_source'],
                                "status"      : "STARTED"}
        """
        # Standardize the run information
        run_data = {
            "anomalous":None,
            "beamline":self.tag,                                # Non-standard
            #"beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            #"beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
            "directory":return_array[0],
            "distance":float(cur_run[3]),
            "energy":float(cur_run[4]),
            #"file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
            "image_prefix":return_array[1],
            "kappa":None,
            "number_images":int(cur_run[2]),
            "omega":None,
            "osc_axis":"phi",
            "osc_start":float(cur_run[6]),
            "osc_width":float(cur_run[7]),
            "phi": 0.0,
            "run_number":int(cur_run[0]),
            "site_tag":self.tag,
            "start_image_number":int(cur_run[1]),
            "time":float(cur_run[8]),
            "transmission":float(cur_run[5]),
            "twotheta":None
        }

        return run_data

    def get_run_data_OLD(self):
        """
        Return contents of run data file
        """

        if self.run_data_file:

            # Copy the file to prevent conflicts with other programs
            # Use the ramdisk if it is available
            if os.path.exists("/dev/shm"):
                tmp_dir = "/dev/shm/"
            else:
                tmp_dir = "/tmp/"

            tmp_file = tmp_dir+uuid.uuid4().hex
            shutil.copyfile(self.run_data_file, tmp_file)

            # Read in the pickled file
            f = open(tmp_file, "rb")
            raw_run_data = pickle.load(f)
            f.close()
            self.logger.debug(raw_run_data)

            # Remove the temporary file
            os.unlink(tmp_file)

            # Standardize the run information
            """
            The current fields saved into the sql datbase adapter are:
                directory,
                image_prefix,
                run_number,
                start_image_number,
                number_images,
                distance,
                phi,
                kappa,
                omega,
                osc_axis,
                osc_start,
                osc_width,
                time,
                transmission,
                energy,
                anomalous
            """
            run_data = {
                "anomalous":None,
                "beamline":raw_run_data.get("beamline", None),              # Non-standard
                "beam_size_x":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
                "beam_size_y":float(raw_run_data.get("beamsize", 0.0)),     # Non-standard
                "directory":raw_run_data.get("directory", None),
                "distance":float(raw_run_data.get("dist", 0.0)),
                "energy":float(raw_run_data.get("energy", 0.0)),
                "file_ctime":datetime.datetime.fromtimestamp(self.run_time).isoformat(),
                "image_prefix":raw_run_data.get("image_prefix", None),
                "kappa":None,
                "number_images":int(float(raw_run_data.get("Nframes", 0))),
                "omega":None,
                "osc_axis":"phi",
                "osc_start":float(raw_run_data.get("start", 0.0)),
                "osc_width":float(raw_run_data.get("width", 0.0)),
                "phi":float(raw_run_data.get("start", 0.0)),
                "run_number":None,
                "site_tag":self.tag,
                "start_image_number":int(float(raw_run_data.get("first_image", 0))),
                "time":float(raw_run_data.get("time", 0.0)),
                "transmission":float(raw_run_data.get("trans", 0.0)),
                "twotheta":None
            }

        else:
            run_data = False

        return run_data
Beispiel #23
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launch_manager",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register()

        # Get the initial possible jobs lists
        full_job_list = [x.get('job_list') for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]]

        try:
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status
                #if self.overwatch_id:
                #    self.ow_registrar.update()

                # Get updated job list by checking which launchers are running
                # Reassign jobs if launcher(s) status changes
                if round(self.timer%TIMER,1) == 1.0:
                    try:
                        # Have Registrar update status
                        if self.overwatch_id:
                            self.ow_registrar.update()

                        # Check which launchers are running
                        temp = [l for l in full_job_list if self.redis.get("OW:"+l)]

                        # Determine which launcher(s) went offline
                        offline = [line for line in self.job_list if temp.count(line) == False]
                        if len(offline) > 0:
                            # Pop waiting jobs off their job_lists and push back in RAPD_JOBS for reassignment.
                            for _l in offline:
                                while self.redis.llen(_l) != 0:
                                    self.redis.rpoplpush(_l, 'RAPD_JOBS')

                        # Determine which launcher(s) came online (Also runs at startup!)
                        online = [line for line in temp if self.job_list.count(line) == False]
                        if len(online) > 0:
                            # Pop jobs off RAPD_JOBS_WAITING and push back onto RAPD_JOBS for reassignment.
                            while self.redis.llen('RAPD_JOBS_WAITING') != 0:
                                self.redis.rpoplpush('RAPD_JOBS_WAITING', 'RAPD_JOBS')

                        # Update the self.job_list
                        self.job_list = temp

                    except redis.exceptions.ConnectionError:
                        if self.logger:
                            self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                        time.sleep(1)

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen("RAPD_JOBS") != 0:
                        command = self.redis.rpop("RAPD_JOBS")
                        # Handle the message
                        if command:
                            #self.push_command(json.loads(command))
                            self.push_command(json.loads(command))
                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    self.timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()
Beispiel #24
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("NecatGatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # Get redis connection

        #self.logger.debug("  Will publish new images on filecreate:%s" % self.tag)
        #self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)
        self.logger.debug("  Will publish new datasets on run_data:%s" % self.tag)
        self.logger.debug("  Will push new datasets onto run_data:%s" % self.tag)

        try:
            while self.go:
                # Check if the run info changed in beamline Redis DB.
                #current_run = self.bl_redis.get("RUN_INFO_SV")
                current_run = self.redis.rpop('run_info_T')
                if current_run not in (None, ""):
                    # Split it
                    #cur_run = current_run.split("_") #runid,first#,total#,dist,energy,transmission,omega_start,deltaomega,time,timestamp
                    #1_1_23_400.00_12661.90_30.00_45.12_0.20_0.50_
                    # Reset it back to an empty string if beamline is E.
                    #self.bl_redis.set("RUN_INFO_SV", "")
                    # get the additional beamline params and put into nice dict.
                    run_data = self.get_run_data(current_run)
                    # Get rid of trailing slash from beamline Redis.
                    #dir = run_data['directory']
                    # Have to remove trailing slash
                    #if dir[-1] == '/':
                    #    run_data['directory'] = dir[:-1]
                    dir = "/epu/rdma%s%s_%d_%06d" % (
                                      run_data['directory'],
                                      run_data['image_prefix'],
                                      int(run_data['run_number']),
                                      int(run_data['start_image_number']))
                    if self.ignored(dir):
                        self.logger.debug("Directory %s is marked to be ignored - skipping", dir)
                    else:
                        """
                        run_data['directory'] = dir
                        self.logger.debug("run_data:%s %s", self.tag, run_data)
                        # Put into exchangable format
                        run_data_json = json.dumps(run_data)
                        # Publish to Redis
                        self.redis.publish("run_data:%s" % self.tag, run_data_json)
                        # Push onto redis list in case no one is currently listening
                        self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                        """
                        ## This loop is for testing##
                        for i in range(2):
                            if i == 1:
                                dir = dir.replace('/epu/', '/epu2/')
                            run_data['directory'] = dir
                            self.logger.debug("run_data:%s %s", self.tag, run_data)
                            # Put into exchangable format
                            run_data_json = json.dumps(run_data)
                            # Publish to Redis
                            self.redis.publish("run_data:%s" % self.tag, run_data_json)
                            # Push onto redis list in case no one is currently listening
                            self.redis.lpush("run_data:%s" % self.tag, run_data_json)
                time.sleep(0.2)
                # Have Registrar update status
                self.ow_registrar.update({"site_id":self.site.ID})
        except KeyboardInterrupt:
            self.stop()
Beispiel #25
0
class Monitor(threading.Thread):
    """Monitor for new data collection run to be submitted to a redis instance"""

    # For stopping/starting
    running = True

    # Connection to the Redis database
    redis = None

    # Storage for where to look for information
    tags = []
    run_lists = []

    # Overwatch
    ow_registrar = None

    def __init__(self,
                 site,
                 notify=None,
                 overwatch_id=None):
        """
        Initialize the RedisMonitor

        Keyword arguments:
        site -- site description
        notify - Function called when image is captured
        overwatch_id -- id for optional overwather wrapper
        """

        self.logger = logging.getLogger("RAPDLogger")

        # Initialize the thread
        threading.Thread.__init__(self)

        # Passed-in variables
        self.site = site
        self.notify = notify
        self.overwatch_id = overwatch_id

        # Figure out the site
        self.get_tags()

        # Start the thread
        # self.daemon = True
        self.start()

    def get_tags(self):
        """Transform site.ID into tag[s] for image monitor"""

        # A string is input - one tag
        if isinstance(self.site.ID, str):
            self.tags = [self.site.ID.upper()]

        # Tuple or list
        elif isinstance(self.site.ID, tuple) or isinstance(self.site.ID, list):
            for site_id in self.site.ID:
                self.tags.append(site_id.upper())

        # Figure out where we are going to look
        for site_tag in self.tags:
            self.run_lists.append(("runs_data:"+site_tag, site_tag))

        self.logger.debug("run_lists: %s", str(self.run_lists))

    def stop(self):
        """Stop the process of polling the redis instance"""

        self.logger.debug("Stopping")

        self.running = False
        self.redis_database.stop()

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')

        #self.redis_database = redis_database.Database(settings=self.site.RUN_MONITOR_SETTINGS)
        #self.redis = self.redis_database.connect_to_redis()
        self.redis = redis_database.Database(settings=self.site.RUN_MONITOR_SETTINGS)

    def run(self):
        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        # ow_round_interval = max(int((5 * len(self.run_lists)) / POLLING_REST), int(5/POLLING_REST))
        ow_round_interval = 10

        self.logger.debug("Finished registering %d", ow_round_interval)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for run_list, site_tag in self.run_lists:
                    raw_run_data = self.redis.rpop(run_list)
                    # Have new run data
                    if raw_run_data not in (None, ""):
                        # Parse into python object
                        #print raw_run_data
                        run_data = json.loads(raw_run_data)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWRUN",
                                     "run_data":run_data,
                                     "site_tag":site_tag})

                        self.logger.debug("New run data %s", raw_run_data)

                    # Slow it down a little
                    time.sleep(POLLING_REST)
                time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()
Beispiel #26
0
    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("Gatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # A RUN & IMAGES EXAMPLE
        # Some logging
        self.logger.debug("  Will publish new images on filecreate:C")
        self.logger.debug("  Will publish new images on image_collected:C")
        self.logger.debug("  Will push new images onto images_collected:C")
        self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
        self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        # Set up the WatchManager
        watch_manager = pyinotify.WatchManager()

        # Set up the notifier for files being made
        notifier = pyinotify.ThreadedNotifier(watch_manager, EventHandler(redis_rapd=self.redis_rapd,
                                                                          redis_remote=self.redis_remote,
                                                                          logger=self.logger))
        notifier.start()

        # Try exiting the pyinotify gracefully
        def exit_gracefully():
            """
            Exit pyinotify properly when program exits
            """
            self.logger.debug("Attempting to gracefully shut down")
            watch_manager.rm_watch(wdd.values())
            notifier.stop()
        atexit.register(exit_gracefully)

        # Start by adding the current dir in the beamline redis db
        DATA_DIR = "ADX_SUBDIR_SV" # "datadir_%s" % self.tag

        # Listen for new directory
        current_dir = ""
        time.sleep(0.5)
        counter = 0
        try:
            while True:
                print counter
                newdir = self.redis_beamline.get(DATA_DIR)
                if (newdir != current_dir):
                    have = False
                    current_dir = newdir
                    self.logger.debug("New directory to watch %s'" % newdir)
                    DirectoryHandler(current_dir=newdir, 
                                     watch_manager=watch_manager,
                                     logger=self.logger)
                time.sleep(1)
                # Update overwatcher every 5 seconds
                if counter % 5 == 0:
                    self.ow_registrar.update({"site_id":self.site.ID})
                    counter = 0
                else:
                    counter += 1

        # Exited by keyboard
        except KeyboardInterrupt:
            self.stop()
Beispiel #27
0
    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launch_manager",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register()

        # Get the initial possible jobs lists
        full_job_list = [
            x.get('job_list')
            for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]
        ]

        try:
            # This is the server portion of the code
            while self.running:
                # Get updated job list by checking which launchers are running
                # Reassign jobs if launcher(s) status changes
                if round(self.timer % TIMER, 1) == 1.0:
                    try:
                        # Have Registrar update status
                        if self.overwatch_id:
                            self.ow_registrar.update()

                        # Check which launchers are running
                        temp = [
                            l for l in full_job_list
                            if self.redis.get("OW:" + l)
                        ]

                        # Determine which launcher(s) went offline
                        offline = [
                            line for line in self.job_list
                            if temp.count(line) == False
                        ]
                        if len(offline) > 0:
                            # Pop waiting jobs off their job_lists and push back in RAPD_JOBS for reassignment.
                            for _l in offline:
                                while self.redis.llen(_l) != 0:
                                    self.redis.rpoplpush(_l, 'RAPD_JOBS')

                        # Determine which launcher(s) came online (Also runs at startup!)
                        online = [
                            line for line in temp
                            if self.job_list.count(line) == False
                        ]
                        if len(online) > 0:
                            # Pop jobs off RAPD_JOBS_WAITING and push back onto RAPD_JOBS for reassignment.
                            while self.redis.llen('RAPD_JOBS_WAITING') != 0:
                                self.redis.rpoplpush('RAPD_JOBS_WAITING',
                                                     'RAPD_JOBS')

                        # Update the self.job_list
                        self.job_list = temp

                    except redis.exceptions.ConnectionError:
                        if self.logger:
                            self.logger.exception(
                                "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                            )
                        time.sleep(1)

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen("RAPD_JOBS") != 0:
                        command = self.redis.rpop("RAPD_JOBS")
                        # Handle the message
                        if command:
                            self.push_command(json.loads(command))
                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    self.timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception(
                            "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                        )
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()
Beispiel #28
0
class Launcher(object):
    """
    Connects to Redis instance, listens for jobs, and spawns new threads using defined
    launcher_adapter
    """

    adapter = None
    ip_address = None
    launcher = None
    tag = None
    pool = None

    def __init__(self, site, tag="", logger=None, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        tag -- optional string describing launcher. Defined in site.LAUNCHER_REGISTER
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """
        # Get the logger Instance
        self.logger = logger

        # Save passed-in variables
        self.site = site
        self.tag = tag
        self.overwatch_id = overwatch_id

        # Retrieve settings for this Launcher
        self.get_settings()

        # Check if additional params in self.launcher
        self.check_settings()

        # Load the adapter
        self.load_adapter()

        # Connect to Redis for communications
        self.connect_to_redis()

        # For loop in self.run
        self.running = True

        self.run()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({
                "site_id":
                json.dumps(self.launcher.get('site_tag')),
                "job_list":
                self.job_list
            })

        try:
            timer = 0
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status every second
                if round(timer % 1, 1) in (0.0, 1.0):
                    if self.overwatch_id:
                        #self.ow_registrar.update({"site_id":self.site.ID,
                        self.ow_registrar.update({
                            "site_id":
                            json.dumps(self.launcher.get('site_tag')),
                            "job_list":
                            self.job_list
                        })
                        #self.ow_registrar.update({"job_list":self.job_list})

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen(self.job_list) != 0:
                        command = self.redis.rpop(self.job_list)
                        # Handle the message
                        if command:
                            self.handle_command(json.loads(command))

                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception(
                            "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                        )
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """Stop everything smoothly."""
        self.running = False
        # Close the file lock handle
        close_lock_file()
        # Try to close the pool. Python bugs gives errors!
        if self.pool:
            self.pool.close()
            self.pool.join()
        # Tell overwatch it is closing
        if self.overwatch_id:
            self.ow_registrar.stop()

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')

        self.redis = redis_database.Database(
            settings=self.site.CONTROL_DATABASE_SETTINGS, logger=self.logger)

    def handle_command(self, command):
        """
        Handle an incoming command

        Keyword arguments:
        command -- command from redis
        """
        print "handle_command"
        pprint(command)

        # Split up the command
        message = command
        if self.logger:
            self.logger.debug("Command received channel:%s  message: %s",
                              self.job_list, message)

        # Use the adapter to launch
        #self.adapter(self.site, message, self.launcher)
        # If running thru a shell limit the number of running processes
        if self.pool:
            self.pool.apply_async(
                self.adapter(self.site, message, self.launcher))
        else:
            self.adapter(self.site, message, self.launcher)

    def get_settings(self):
        """
        Get the settings for this Launcher based on ip address and tag
        """

        # Get IP Address
        self.ip_address = utils.site.get_ip_address()
        #print self.ip_address
        if self.logger:
            self.logger.debug("Found ip address to be %s", self.ip_address)

        # Save typing
        launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]

        # Look for the launcher matching this ip_address and the input tag
        possible_tags = []
        for launcher in launchers:
            #print launcher
            if launcher.get('ip_address') == self.ip_address and launcher.get(
                    'tag') == self.tag:
                self.launcher = launcher
                break
            elif launcher.get('ip_address') == self.ip_address:
                possible_tags.append(launcher.get('tag'))

        # No launcher adapter
        if self.launcher is None:

            # No launchers for this IP address
            if len(possible_tags) == 0:
                print "  There are no launcher adapters registered for this ip address"
            # IP Address in launchers, but not the input tag
            else:
                print text.error + "There is a launcher adapter registered for thi\
s IP address (%s), but not for the input tag (%s)" % (self.ip_address,
                                                      self.tag)
                print "  Available tags for this IP address:"
                for tag in possible_tags:
                    print "    %s" % tag
                print text.stop

            # Exit in error state
            sys.exit(9)
        else:
            # Get the job_list to watch for this launcher
            self.job_list = self.launcher.get('job_list')

    def check_settings(self):
        """Check if additional params in self.launcher need setup."""
        # Check if a multiprocessing.Pool needs to be setup for launcher adapter.
        if self.tag == 'shell':
            if self.launcher.get('pool_size', False):
                size = self.launcher.get('pool_size')
            else:
                size = total_nproc() - 1
            # Make sure its an integer
            self.pool = mp_pool(int(size))

    def load_adapter(self):
        """Find and load the adapter"""

        # Import the database adapter as database module

        self.adapter = load_module(
            seek_module=self.launcher["adapter"],
            directories=self.site.LAUNCHER_SETTINGS[
                "RAPD_LAUNCHER_ADAPTER_DIRECTORIES"]).LauncherAdapter

        if self.logger:
            self.logger.debug(self.adapter)
Beispiel #29
0
class Launcher(object):
    """
    Connects to Redis instance, listens for jobs, and spawns new threads using defined
    launcher_adapter
    """

    adapter = None
    #adapter_file = None
    #database = None
    # address = None
    ip_address = None
    #job_types = None
    launcher = None
    #port = None
    tag = None

    def __init__(self, site, tag="", logger=None, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        tag -- optional string describing launcher. Defined in site.LAUNCHER_REGISTER
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """
        # Get the logger Instance
        self.logger = logger

        # Save passed-in variables
        self.site = site
        self.tag = tag
        self.overwatch_id = overwatch_id

        # Retrieve settings for this Launcher
        self.get_settings()

        # Load the adapter
        self.load_adapter()

        # Connect to Redis for communications
        self.connect_to_redis()

        # For loop in self.run
        self.running = True

        self.run()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launcher",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register({"site_id":self.site.ID,
                                        "job_list":self.job_list})

        try:
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status
                if self.overwatch_id:
                    self.ow_registrar.update({"site_id":self.site.ID,
                                              "job_list":self.job_list})

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen(self.job_list) != 0:
                        command = self.redis.rpop(self.job_list)
                        # Handle the message
                        if command:
                            self.handle_command(json.loads(command))

                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """Stop everything smoothly."""
        self.running = False
        if self.overwatch_id:
            self.ow_registrar.stop()
        self.redis_database.stop()

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')

        #self.redis_database = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        #self.redis = self.redis_database.connect_to_redis()
        #self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS, 
                                             logger=self.logger)

    def handle_command(self, command):
        """
        Handle an incoming command

        Keyword arguments:
        command -- command from redis
        """
        print "handle_command"
        pprint(command)

        # Split up the command
        message = command
        if self.logger:
            self.logger.debug("Command received channel:%s  message: %s", self.job_list, message)

        # Use the adapter to launch
        self.adapter(self.site, message, self.launcher)
        #Thread(target=self.adapter, args=(self.site, message, self.launcher)).start()

    def get_settings(self):
        """
        Get the settings for this Launcher based on ip address and tag
        """

        # Save typing
        #launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_REGISTER"]

        # Get IP Address
        self.ip_address = utils.site.get_ip_address()
        #print self.ip_address
        if self.logger:
            self.logger.debug("Found ip address to be %s", self.ip_address)
        """
        # Look for the launcher matching this ip_address and the input tag
        possible_tags = []
        for launcher in launchers:
            if launcher[0] == self.ip_address and launcher[1] == self.tag:
                self.launcher = launcher
                break
            elif launcher[0] == self.ip_address:
                possible_tags.append(launcher[1])
        """

        # Save typing
        launchers = self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]

        # Look for the launcher matching this ip_address and the input tag
        possible_tags = []
        for launcher in launchers:
            #print launcher
            if launcher.get('ip_address') == self.ip_address and launcher.get('tag') == self.tag:
                self.launcher = launcher
                break
            elif launcher.get('ip_address') == self.ip_address:
                possible_tags.append(launcher.get('tag'))

        # No launcher adapter
        if self.launcher is None:

            # No launchers for this IP address
            if len(possible_tags) == 0:
                print "  There are no launcher adapters registered for this ip address"
            # IP Address in launchers, but not the input tag
            else:
                print text.error + "There is a launcher adapter registered for thi\
s IP address (%s), but not for the input tag (%s)" % (self.ip_address, self.tag)
                print "  Available tags for this IP address:"
                for tag in possible_tags:
                    print "    %s" % tag
                print text.stop

            # Exit in error state
            sys.exit(9)
        else:
            # Get the job_list to watch for this launcher
            self.job_list = self.launcher.get('job_list')

    def load_adapter(self):
        """Find and load the adapter"""

        # Import the database adapter as database module
        self.adapter = load_module(
            seek_module=self.launcher["adapter"],
            directories=self.site.LAUNCHER_SETTINGS["RAPD_LAUNCHER_ADAPTER_DIRECTORIES"]).LauncherAdapter
        if self.logger:
            self.logger.debug(self.adapter)
Beispiel #30
0
class Launcher_Manager(Thread):
    """
    Listens to the 'RAPD_JOBS'list and sends jobs to proper
    launcher.
    """
    def __init__(self, site, logger=False, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        redis -- Redis instance for communication
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """
        # If logger is passed in from main use that...
        if logger:
            self.logger = logger
        else:
            # Otherwise, get the rapd logger
            self.logger = logging.getLogger("RAPDLogger")

        # Initialize the thread
        Thread.__init__(self)

        # Save passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.running = True
        self.timer = 0
        self.job_list = []

        self.connect_to_redis()

        self.start()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launch_manager",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register()

        # Get the initial possible jobs lists
        full_job_list = [
            x.get('job_list')
            for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]
        ]

        try:
            # This is the server portion of the code
            while self.running:
                # Get updated job list by checking which launchers are running
                # Reassign jobs if launcher(s) status changes
                if round(self.timer % TIMER, 1) == 1.0:
                    try:
                        # Have Registrar update status
                        if self.overwatch_id:
                            self.ow_registrar.update()

                        # Check which launchers are running
                        temp = [
                            l for l in full_job_list
                            if self.redis.get("OW:" + l)
                        ]

                        # Determine which launcher(s) went offline
                        offline = [
                            line for line in self.job_list
                            if temp.count(line) == False
                        ]
                        if len(offline) > 0:
                            # Pop waiting jobs off their job_lists and push back in RAPD_JOBS for reassignment.
                            for _l in offline:
                                while self.redis.llen(_l) != 0:
                                    self.redis.rpoplpush(_l, 'RAPD_JOBS')

                        # Determine which launcher(s) came online (Also runs at startup!)
                        online = [
                            line for line in temp
                            if self.job_list.count(line) == False
                        ]
                        if len(online) > 0:
                            # Pop jobs off RAPD_JOBS_WAITING and push back onto RAPD_JOBS for reassignment.
                            while self.redis.llen('RAPD_JOBS_WAITING') != 0:
                                self.redis.rpoplpush('RAPD_JOBS_WAITING',
                                                     'RAPD_JOBS')

                        # Update the self.job_list
                        self.job_list = temp

                    except redis.exceptions.ConnectionError:
                        if self.logger:
                            self.logger.exception(
                                "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                            )
                        time.sleep(1)

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen("RAPD_JOBS") != 0:
                        command = self.redis.rpop("RAPD_JOBS")
                        # Handle the message
                        if command:
                            self.push_command(json.loads(command))
                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    self.timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception(
                            "Remote Redis is not up. Waiting for Sentinal to switch to new host"
                        )
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        if self.logger:
            self.logger.debug('shutting down launcher manager')
        self.running = False
        if self.overwatch_id:
            self.ow_registrar.stop()

    def set_launcher(self, command=False, site_tag=False):
        """Find the correct running launcher to launch a specific job COMMAND"""
        # list of commands to look for in the 'job_types'
        # If the highest prioity launcher is 'ALL', it is chosen.
        search = ['ALL']
        if command:
            search.append(command)

        # Search through launchers
        for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]:
            # Is launcher running?
            if x.get('job_list') in self.job_list:
                # check if its job type matches the command
                for j in search:
                    if j in x.get('job_types'):
                        # Check if launcher is accepting jobs for this beamline
                        if site_tag:
                            if site_tag in x.get('site_tag'):
                                return (x.get('job_list'), x.get('launch_dir'))
                        else:
                            return (x.get('job_list'), x.get('launch_dir'))

        # Return False if no running launchers are appropriate
        return (False, False)

    def push_command(self, command):
        """
        Handle an incoming command

        Keyword arguments:
        command -- command from redis
        """
        print "push_command"
        #pprint(command)

        # Split up the command
        message = command
        if self.logger:
            self.logger.debug(
                "Command received channel:RAPD_JOBS  message: %s", message)

        # get the site_tag from the image header to determine beamline where is was collected.
        site_tag = launch_tools.get_site_tag(message)

        # get the correct running launcher and launch_dir
        launcher, launch_dir = self.set_launcher(message['command'], site_tag)

        if message['command'].startswith('INTEGRATE'):
            print 'type: %s...%s' % (message['preferences']['xdsinp'][:100],
                                     message['preferences']['xdsinp'][-100:])

        if launcher:
            # Update preferences to be in server run mode
            if not message.get("preferences"):
                message["preferences"] = {}
            message["preferences"]["run_mode"] = "server"

            # Pass along the Launch directory
            if not message.get("directories"):
                message["directories"] = {}
            message["directories"]["launch_dir"] = launch_dir

            # Push the job on the correct launcher job list
            self.redis.lpush(launcher, json.dumps(message))
            if self.logger:
                self.logger.debug("Command sent channel:%s  message: %s",
                                  launcher, message)
        else:
            self.redis.lpush('RAPD_JOBS_WAITING', json.dumps(message))
            if self.logger:
                self.logger.debug(
                    "Could not find a running launcher for this job. Putting job on RAPD_JOBS_WAITING list"
                )

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')

        #self.redis_db = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        #self.redis = self.redis_db.connect_to_redis()
        #self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        self.redis = redis_database.Database(
            settings=self.site.CONTROL_DATABASE_SETTINGS, logger=self.logger)
Beispiel #31
0
class Gatherer(object):
    """
    Watches the beamline and signals images and runs over redis
    """
    # For keeping track of file change times
    run_time = 0
    image_time = 0

    # Host computer detail
    ip_address = None

    def __init__(self, site, overwatch_id=None):
        """
        Setup and start the Gatherer
        """

        # Get the logger Instance
        self.logger = logging.getLogger("RAPDLogger")

        # Passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.logger.info("Gatherer.__init__")

        # Get our bearings
        self.set_host()

        # Connect to redis
        self.connect()

        # Running conditions
        self.go = True

        # Now run
        self.run()

    def run(self):
        """
        The while loop for watching the files
        """
        self.logger.info("Gatherer.run")

        # Set up overwatcher
        self.ow_registrar = Registrar(site=self.site,
                                      ow_type="gatherer",
                                      ow_id=self.overwatch_id)
        self.ow_registrar.register({"site_id":self.site.ID})

        # A RUN & IMAGES EXAMPLE
        # Some logging
        self.logger.debug("  Will publish new images on filecreate:C")
        self.logger.debug("  Will publish new images on image_collected:C")
        self.logger.debug("  Will push new images onto images_collected:C")
        self.logger.debug("  Will publish new images on image_collected:%s" % self.tag)
        self.logger.debug("  Will push new images onto images_collected:%s" % self.tag)

        # Set up the WatchManager
        watch_manager = pyinotify.WatchManager()

        # Set up the notifier for files being made
        notifier = pyinotify.ThreadedNotifier(watch_manager, EventHandler(redis_rapd=self.redis_rapd,
                                                                          redis_remote=self.redis_remote,
                                                                          logger=self.logger))
        notifier.start()

        # Try exiting the pyinotify gracefully
        def exit_gracefully():
            """
            Exit pyinotify properly when program exits
            """
            self.logger.debug("Attempting to gracefully shut down")
            watch_manager.rm_watch(wdd.values())
            notifier.stop()
        atexit.register(exit_gracefully)

        # Start by adding the current dir in the beamline redis db
        DATA_DIR = "ADX_DIRECTORY_SV" # "datadir_%s" % self.tag

        # Listen for new directory
        current_dir = ""
        time.sleep(0.5)
        counter = 0
        try:
            while True:
                print counter
                newdir = self.redis_beamline.get(DATA_DIR)
                if (newdir != current_dir):
                    have = False
                    current_dir = newdir
                    self.logger.debug("New directory to watch %s'" % newdir)
                    DirectoryHandler(current_dir=newdir, 
                                     watch_manager=watch_manager,
                                     logger=self.logger)
                time.sleep(1)
                # Update overwatcher every 5 seconds
                if counter % 5 == 0:
                    self.ow_registrar.update({"site_id":self.site.ID})
                    counter = 0
                else:
                    counter += 1

        # Exited by keyboard
        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        """
        Stop the loop
        """
        self.logger.debug("Gatherer.stop")

        self.go = False


    def set_host(self):
        """
        Use os.uname to set files to watch
        """
        self.logger.debug("Gatherer.set_host")

        # Figure out which host we are on
        self.ip_address = socket.gethostbyaddr(socket.gethostname())[-1][0]
        self.logger.debug("IP Address: %s" % self.ip_address)

        # Now grab the file locations, beamline from settings
        if self.site.GATHERERS.has_key(self.ip_address):
            self.tag = self.site.GATHERERS[self.ip_address]
            # Make sure we enforce uppercase for tag
            self.tag = self.tag.upper()
        else:
            print "ERROR - no settings for this host"
            self.tag = "test"

    def connect(self):
        """
        Connect to redis host
        """

        self.logger.debug("Gatherer.connect")

        # Connect to RAPD Redis
        self.redis_rapd = Database(settings=self.site.CONTROL_DATABASE_SETTINGS)

        # NECAT uses Redis to communicate with the beamline
        # Connect to beamline Redis to monitor if run is launched
        self.redis_beamline = Database(settings=self.site.SITE_ADAPTER_SETTINGS[self.tag])

        # NECAT uses Redis to communicate with the remote system
        # Connect to remote system Redis to monitor if run is launched
        self.redis_remote = Database(settings=self.site.REMOTE_ADAPTER_SETTINGS)
Beispiel #32
0
class Monitor(Thread):
    """Monitor for new data collection images to be submitted to a redis instance"""

    # Used for stopping/starting the loop
    running = True

    # The connection to the Redis database
    redis = None

    # Storage for where to look for information
    tags = []

    # Overwatch
    ow_registrar = None

    def __init__(self,
                 site,
                 clean_start=False,
                 notify=None,
                 overwatch_id=None):
        """
        Initialize the monitor

        Keyword arguments:
        site -- site description
        notify - Function called when image is captured
        overwatch_id -- id for optional overwather wrapper
        """

        # Get the logger
        self.logger = logging.getLogger("RAPDLogger")

        # Initialize the thread
        Thread.__init__(self)

        # Passed-in variables
        self.site = site
        self.clean_start = clean_start
        self.notify = notify
        self.overwatch_id = overwatch_id

        # Figure out tag(s)
        self.get_tags()

        # Start the thread
        # self.daemon = True
        self.start()

    def get_tags(self):
        """Transform site.ID into tag[s] for image monitor"""

        # A string is input - one tag
        if isinstance(self.site.ID, str):
            self.tags = [self.site.ID.upper()]

        # Tuple or list
        elif isinstance(self.site.ID, tuple) or isinstance(self.site.ID, list):
            for site_id in self.site.ID:
                self.tags.append(site_id.upper())

    def stop(self):
        """Stop the process of polling the redis instance"""

        self.logger.debug("Stopping")

        self.running = False
        #self.redis_database.stop()

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')
        #self.redis_database = redis_database.Database(settings=self.site.IMAGE_MONITOR_SETTINGS)
        #self.redis = self.redis_database.connect_to_redis()
        self.redis = redis_database.Database(settings=self.site.IMAGE_MONITOR_SETTINGS)

    def run(self):
        """Orchestrate the monitoring for new images in redis db"""

        self.logger.debug("Running")

        # Connect to Redis
        self.connect_to_redis()

        # Create Overwatch Registrar instance
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="control",
                                          ow_id=self.overwatch_id)
            # Register
            self.ow_registrar.register()

        # Determine interval for overwatch update
        ow_round_interval = 50 # int((5 * len(self.image_lists)) / POLLING_REST)

        # If we are starting clean
        if self.clean_start:
            for tag in self.tags:
                self.redis.delete("images_collected:%s" % tag)

        while self.running:

            # ~5 seconds between overwatch updates
            for __ in range(ow_round_interval):

                for tag in self.tags:

                    # Try to pop the oldest image off the list
                    new_image = self.redis.rpop("images_collected:%s" % tag)
                    #new_image = self.redis.rpop("images_collected_%s" % tag)

                    # Have a new_image
                    if new_image:
                        # self.logger.debug("New image %s - %s", tag, new_image)

                        # Notify core thread that an image has been collected
                        self.notify({"message_type":"NEWIMAGE",
                                     "fullname":new_image,
                                     "site_tag":tag})

                        # self.logger.debug("New image data %s", new_image)

                    # Slow it down a little
                    time.sleep(POLLING_REST)

            # Have Registrar update status
            if self.overwatch_id:
                self.ow_registrar.update()

        self.logger.debug("Exit image monitor loop")
Beispiel #33
0
class Launcher_Manager(threading.Thread):
    """
    Listens to the 'RAPD_JOBS'list and sends jobs to proper
    launcher.
    """
    def __init__(self, site, logger=False, overwatch_id=False):
        """
        Initialize the Launcher instance

        Keyword arguments:
        site -- site object with relevant information to run
        redis -- Redis instance for communication
        logger -- logger instance (default = None)
        overwatch_id -- id for optional overwatcher instance
        """
        # If logger is passed in from main use that...
        if logger:
            self.logger = logger
        else:
            # Otherwise, get the rapd logger
            self.logger = logging.getLogger("RAPDLogger")

        # Initialize the thread
        threading.Thread.__init__(self)

        # Save passed-in variables
        self.site = site
        self.overwatch_id = overwatch_id

        self.running = True
        self.timer = 0
        self.job_list = []

        self.connect_to_redis()

        self.start()

    def run(self):
        """The core process of the Launcher instance"""

        # Set up overwatcher
        if self.overwatch_id:
            self.ow_registrar = Registrar(site=self.site,
                                          ow_type="launch_manager",
                                          ow_id=self.overwatch_id)
            self.ow_registrar.register()

        # Get the initial possible jobs lists
        full_job_list = [x.get('job_list') for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]]

        try:
            # This is the server portion of the code
            while self.running:
                # Have Registrar update status
                #if self.overwatch_id:
                #    self.ow_registrar.update()

                # Get updated job list by checking which launchers are running
                # Reassign jobs if launcher(s) status changes
                if round(self.timer%TIMER,1) == 1.0:
                    try:
                        # Have Registrar update status
                        if self.overwatch_id:
                            self.ow_registrar.update()

                        # Check which launchers are running
                        temp = [l for l in full_job_list if self.redis.get("OW:"+l)]

                        # Determine which launcher(s) went offline
                        offline = [line for line in self.job_list if temp.count(line) == False]
                        if len(offline) > 0:
                            # Pop waiting jobs off their job_lists and push back in RAPD_JOBS for reassignment.
                            for _l in offline:
                                while self.redis.llen(_l) != 0:
                                    self.redis.rpoplpush(_l, 'RAPD_JOBS')

                        # Determine which launcher(s) came online (Also runs at startup!)
                        online = [line for line in temp if self.job_list.count(line) == False]
                        if len(online) > 0:
                            # Pop jobs off RAPD_JOBS_WAITING and push back onto RAPD_JOBS for reassignment.
                            while self.redis.llen('RAPD_JOBS_WAITING') != 0:
                                self.redis.rpoplpush('RAPD_JOBS_WAITING', 'RAPD_JOBS')

                        # Update the self.job_list
                        self.job_list = temp

                    except redis.exceptions.ConnectionError:
                        if self.logger:
                            self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                        time.sleep(1)

                # Look for a new command
                # This will throw a redis.exceptions.ConnectionError if redis is unreachable
                #command = self.redis.brpop(["RAPD_JOBS",], 5)
                try:
                    while self.redis.llen("RAPD_JOBS") != 0:
                        command = self.redis.rpop("RAPD_JOBS")
                        # Handle the message
                        if command:
                            #self.push_command(json.loads(command))
                            self.push_command(json.loads(command))
                            # Only run 1 command
                            # self.running = False
                            # break
                    # sleep a little when jobs aren't coming in.
                    time.sleep(0.2)
                    self.timer += 0.2
                except redis.exceptions.ConnectionError:
                    if self.logger:
                        self.logger.exception("Remote Redis is not up. Waiting for Sentinal to switch to new host")
                    time.sleep(1)

        except KeyboardInterrupt:
            self.stop()

    def stop(self):
        if self.logger:
            self.logger.debug('shutting down launcher manager')
        self.running = False
        if self.overwatch_id:
            self.ow_registrar.stop()
        self.redis_db.stop()

    def set_launcher(self, command=False, site_tag=False):
        """Find the correct running launcher to launch a specific job COMMAND"""
        # list of commands to look for in the 'job_types'
        # If the highest prioity launcher is 'ALL', it is chosen.
        search = ['ALL']
        if command:
            search.append(command)

        # Search through launchers
        for x in self.site.LAUNCHER_SETTINGS["LAUNCHER_SPECIFICATIONS"]:
            # Is launcher running?
            if x.get('job_list') in self.job_list:
                # check if its job type matches the command
                for j in search:
                    if j in x.get('job_types'):
                        # Check if launcher is accepting jobs for this beamline
                        if site_tag:
                            if site_tag in x.get('site_tag'):
                                return (x.get('job_list'), x.get('launch_dir'))
                        else:
                            return (x.get('job_list'), x.get('launch_dir'))

        # Return False if no running launchers are appropriate
        return (False, False)

    def push_command(self, command):
        """
        Handle an incoming command

        Keyword arguments:
        command -- command from redis
        """
        print "push_command"
        #pprint(command)

        # Split up the command
        message = command
        if self.logger:
            self.logger.debug("Command received channel:RAPD_JOBS  message: %s", message)

        # get the site_tag from the image header to determine beamline where is was collected.
        site_tag = launch_tools.get_site_tag(message)

        # get the correct running launcher and launch_dir
        launcher, launch_dir = self.set_launcher(message['command'], site_tag)

        if message['command'].startswith('INTEGRATE'):
            print 'type: %s'%message['preferences']['xdsinp']

        if launcher:
            # Update preferences to be in server run mode
            if not message.get("preferences"):
                message["preferences"] = {}
            message["preferences"]["run_mode"] = "server"

            # Pass along the Launch directory
            if not message.get("directories"):
                message["directories"] = {}
            message["directories"]["launch_dir"] = launch_dir

            # Push the job on the correct launcher job list
            self.redis.lpush(launcher, json.dumps(message))
            if self.logger:
                self.logger.debug("Command sent channel:%s  message: %s", launcher, message)
        else:
            self.redis.lpush('RAPD_JOBS_WAITING', json.dumps(message))
            if self.logger:
                self.logger.debug("Could not find a running launcher for this job. Putting job on RAPD_JOBS_WAITING list")

    def connect_to_redis(self):
        """Connect to the redis instance"""
        redis_database = importlib.import_module('database.redis_adapter')

        #self.redis_db = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        #self.redis = self.redis_db.connect_to_redis()
        #self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
        self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS, 
                                             logger=self.logger)