예제 #1
0
    def on_connection_error(self, connection, msg):

        oldhost = self.__get_whole_host_name()
        time_passed = datetime.datetime.now() - self.__start_connect_time
        time_passed_seconds = time_passed.total_seconds()
        logerror(
            LOGGER,
            'Could not connect to %s: "%s" (connection failure after %s seconds)',
            oldhost, msg, time_passed_seconds)

        self.__store_connection_error_info(msg, oldhost)

        # If there was a force-finish, we do not reconnect.
        if self.statemachine.is_FORCE_FINISHED():
            errormsg = 'Permanently failed to connect to RabbitMQ.'
            if self.statemachine.detail_asked_to_gently_close_by_publisher:
                errormsg += ' Tried all hosts until was force-closed by user.'
            elif self.statemachine.detail_asked_to_force_close_by_publisher:
                errormsg += ' Tried all hosts until a user close-down forced us to give up (e.g. the maximum waiting time was reached).'
            errormsg += ' Giving up. No PID requests will be sent.'
            self.__give_up_reconnecting_and_raise_exception(errormsg)

        # If there is alternative URLs, try one of them:
        if self.__node_manager.has_more_urls():
            logdebug(LOGGER,
                     'Connection failure: %s fallback URLs left to try.',
                     self.__node_manager.get_num_left_urls())
            self.__node_manager.set_next_host()
            newhost = self.__get_whole_host_name()
            loginfo(LOGGER,
                    'Connection failure: Trying to connect (now) to %s.',
                    newhost)
            reopen_seconds = 0
            self.__wait_and_trigger_reconnection(connection, reopen_seconds)

        # If there is no URLs, reset the node manager to
        # start at the first nodes again...
        else:
            self.__reconnect_counter += 1
            if self.__reconnect_counter <= self.__max_reconnection_tries:
                reopen_seconds = self.__wait_seconds_before_reconnect
                logdebug(
                    LOGGER,
                    'Connection failure: Failed connecting to all hosts. Waiting %s seconds and starting over.',
                    reopen_seconds)
                self.__node_manager.reset_nodes()
                newhost = self.__node_manager.get_connection_parameters().host
                loginfo(
                    LOGGER,
                    'Connection failure: Trying to connect (in %s seconds) to %s.',
                    reopen_seconds, newhost)
                self.__wait_and_trigger_reconnection(connection,
                                                     reopen_seconds)

            # Give up after so many tries...
            else:
                errormsg = (
                    'Permanently failed to connect to RabbitMQ. Tried all hosts %s times. Giving up. No PID requests will be sent.'
                    % (self.__max_reconnection_tries + 1))
                self.__give_up_reconnecting_and_raise_exception(errormsg)
예제 #2
0
    def make_data_cart_pid(self, dict_of_drs_ids_and_pids):
        logdebug(LOGGER, 'Making a PID for a data cart full of datasets...')

        # Check arg
        if not type(dict_of_drs_ids_and_pids) == type(dict()):
            if type(dict_of_drs_ids_and_pids) == type([]):
                raise esgfpid.exceptions.ArgumentError(
                    'Please provide a dictionary of dataset ids and handles, not a list'
                )
            else:
                raise esgfpid.exceptions.ArgumentError(
                    'Please provide a dictionary of dataset ids and handles')

        # Make a pid (hash on the content):
        cart_handle = DataCartAssistant._get_handle_string_for_datacart(
            dict_of_drs_ids_and_pids, self.__prefix)

        # Make and send message
        message = self.__make_message(cart_handle, dict_of_drs_ids_and_pids)
        self.__send_message_to_queue(message)

        # Return pid
        logdebug(LOGGER,
                 'Making a PID for a data cart full of datasets... done.')
        loginfo(LOGGER, 'Requesting to create PID for data cart (%s).',
                cart_handle)
        return cart_handle
예제 #3
0
    def on_message_not_accepted(self, channel, returned_frame, props, body):
        # Messages that are returned are confirmed anyways.
        # If we sent 20 messages that are returned, all 20 are acked,
        # so we do not need to retrieve them from the unconfirmed
        # messages after resending.
        # In the end, we'll have published 40 messages and received 40 acks.

        # Logging...
        logtrace(
            LOGGER, 'Return frame: %s', returned_frame
        )  # <Basic.Return(['exchange=rabbitsender_integration_tests', 'reply_code=312', 'reply_text=NO_ROUTE', 'routing_key=cmip6.publisher.HASH.cart.datasets'])>
        logtrace(
            LOGGER, 'Return props: %s', props
        )  # <BasicProperties(['content_type=application/json', 'delivery_mode=2'])>
        logtrace(LOGGER, 'Return body: %s', body)

        # Was it the first or second time it comes back?
        if returned_frame.reply_text == 'NO_ROUTE':
            loginfo(
                LOGGER,
                'The message was returned because it could not be assigned to any queue. No binding for routing key "%s".',
                returned_frame.routing_key)
            if returned_frame.routing_key.startswith(
                    esgfpid.utils.RABBIT_EMERGENCY_ROUTING_KEY):
                self.__log_about_double_return(returned_frame, body)
            else:
                self.__resend_message(returned_frame, props, body)
        else:
            logerror(
                LOGGER,
                'The message was returned. Routing key: %s. Unknown reason: %s',
                returned_frame.routing_key, returned_frame.reply_text)
            self.__resend_message(returned_frame, props, body)
예제 #4
0
def get_routing_key_and_string_message_from_message_if_possible(msg):

    # Try to convert message to json:
    json_ok = False
    msg_json = None
    msg_string = None

    if msg is None:
        raise ValueError('The message that was passed is None.')

    # Get JSON from message, if possible!
    if isinstance(msg, basestring):

        try:
            # Valid string message --> JSON
            msg_string = msg
            msg_json = json.loads(msg)
            json_ok = True
            logdebug(LOGGER, 'Message was transformed to json.')
        except ValueError as e:

            # Invalid string message
            loginfo(LOGGER, 'Message seems to be invalid json: %s', msg)
            msg_string = str(msg)
            json_ok = False
    else:
        try:
            # Message is json already.
            msg_string = json.dumps(msg)
            msg_json = msg
            json_ok = True
            logtrace(LOGGER, 'Message was already json.')

        except TypeError as e:
            if 'not JSON serializable' in e.message:

                # Message was whatever.
                msg_string = str(msg)
                json_ok = False
                msg = (
                    'Message was neither JSON nor string and not understandable: %s'
                    % msg_string)
                loginfo(LOGGER, msg)
                raise ValueError(msg)

    # If we succeeded, try to get routing key:
    routing_key = None
    if json_ok:
        try:
            routing_key = msg_json['ROUTING_KEY']
            logtrace(LOGGER, 'Routing key extracted from message.')
        except (KeyError, TypeError) as e:
            logdebug(LOGGER, 'No routing key in message.')
            routing_key = esgfpid.defaults.RABBIT_DEFAULT_ROUTING_KEY
            pass  # There is no routing key in the message
    else:
        routing_key = esgfpid.defaults.RABBIT_DEFAULT_ROUTING_KEY

    return routing_key, msg_string
예제 #5
0
 def __log_previously_stored_files_found(self):
     concat_files = ', '.join(self.__list_of_previous_files)
     type_files = type(self.__list_of_previous_files)
     logdebug(LOGGER, 'Previously published fileset: %s (%s)', concat_files,
              type_files)
     loginfo(
         LOGGER,
         'Data integrity check will be run after files were specified.')
예제 #6
0
 def __close_because_all_done(self, iteration):
     logdebug(
         LOGGER,
         'Gentle finish (iteration %i): All messages sent and confirmed in %ith try (waited and rechecked %i times).',
         self.__close_decision_iterations, iteration, iteration - 1)
     loginfo(LOGGER, 'All messages sent and confirmed. Closing.')
     self.__normal_finish()
     self.__tell_publisher_to_stop_waiting_for_gentle_finish()
예제 #7
0
 def __inform_about_state_at_shutdown(self):
     unsent = self.thread.get_num_unpublished()
     unconfirmed = self.thread.get_num_unconfirmed()
     if unsent + unconfirmed > 0:
         logwarn(LOGGER, 
             'At close down: %i pending messages (%i unpublished messages, %i unconfirmed messages).',
             (unsent+unconfirmed), unsent, unconfirmed)
     else:
         loginfo(LOGGER, 'After close down: All messages were published and confirmed.')
예제 #8
0
    def __init__(self, nodemanager):

        loginfo(LOGGER, 'Init of SynchronousRabbitConnector!!! Bla')
        '''
        NodeManager provices info about all
        hosts.
        '''
        self.__nodemanager = nodemanager
        '''
        Props for basic_publish. Does not
        depend on host, so store it once for all.
        '''
        self.__props = self.__nodemanager.get_properties_for_message_publications(
        )
        '''
        To count how many times we have tried to reconnect the set of
        RabbitMQ hosts.
        '''
        self.__reconnect_counter = 0
        '''
        To see how many times we should try reconnecting to the set 
        of RabbitMQ hosts. Note that if there is 3 hosts, and we try 2
        times, this means 6 connection tries in total.
        '''
        self.__max_reconnection_tries = defaults.RABBIT_RECONNECTION_MAX_TRIES
        '''
        How many seconds to wait before reconnecting after having tried
        all hosts. (There is no waiting time trying to connect to a different
        host after one fails).
        '''
        self.__wait_seconds_before_reconnect = defaults.RABBIT_RECONNECTION_SECONDS
        '''
        To see how much time it takes to connect. Once a connection is
        established or failed, we print the time delta to logs.
        '''
        self.__start_connect_time = None
        '''
        If the messages should not be published to the exchange that
        was passed from the publisher in config, but to a fallback 
        solution, this will be set:
        '''
        self.__fallback_exchange = None
        ''' Set of all tried hosts, for logging. '''
        self.__all_hosts_that_were_tried = set()

        # Defaults:
        self.__mandatory_flag = esgfpid.defaults.RABBIT_MANDATORY_DELIVERY
        self.__max_tries = esgfpid.defaults.RABBIT_SYN_MESSAGE_MAX_TRIES
        self.__timeout_milliseconds = esgfpid.defaults.RABBIT_SYN_MESSAGE_TIMEOUT_MILLISEC
        self.__candidate_fallback_exchange_name = defaults.RABBIT_FALLBACK_EXCHANGE_NAME

        # Other settings:
        self.__channel = None
        self.__connection = None
        self.__communication_established = False
        self.__connection_last_process_event_call = 0
        self.__error_messages_during_init = []
예제 #9
0
 def __wait_and_trigger_reconnection(self, connection, wait_seconds):
     if self.statemachine.is_FORCE_FINISHED():
         errormsg = 'Permanently failed to connect to RabbitMQ. Tried all hosts until received a force-finish. Giving up. No PID requests will be sent.'
         self.__give_up_reconnecting_and_raise_exception(errormsg)
     else:
         self.statemachine.set_to_waiting_to_be_available()
         loginfo(LOGGER, 'Trying to reconnect to RabbitMQ in %s seconds.',
                 wait_seconds)
         connection.add_timeout(wait_seconds, self.reconnect)
         logtrace(LOGGER,
                  'Reconnect event added to connection %s (not to %s)',
                  connection, self.thread._connection)
예제 #10
0
 def __please_open_connection(self):
     params = self.__node_manager.get_connection_parameters()
     self.__start_connect_time = datetime.datetime.now()
     logdebug(LOGGER, 'Connecting to RabbitMQ at %s... (%s)', params.host,
              get_now_utc_as_formatted_string())
     loginfo(LOGGER, 'Opening connection to RabbitMQ...')
     self.thread._connection = pika.SelectConnection(
         parameters=params,
         on_open_callback=self.on_connection_open,
         on_open_error_callback=self.on_connection_error,
         on_close_callback=self.on_connection_closed,
         stop_ioloop_on_close=False  # why? see below. 
     )
예제 #11
0
    def remove_errata_ids(self, **args):
        logdebug(LOGGER, 'Removing errata ids...')
        mandatory_args = ['drs_id', 'version_number', 'errata_ids']
        esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
        esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)

        dataset_handle = self.__get_dataset_handle(args)
        errata_ids = self.__get_errata_ids_as_list(args)
        message = self.__make_remove_message(errata_ids, dataset_handle, args['drs_id'], args['version_number'])
        self.__send_message_to_queue(message)

        loginfo(LOGGER, 'Requesting to remove errata ids "%s" from dataset "%s".', ', '.join(errata_ids), dataset_handle)
        logdebug(LOGGER, 'Removing errata ids... done')
예제 #12
0
 def __please_open_connection(self):
     params = self.__node_manager.get_connection_parameters()
     self.__start_connect_time = datetime.datetime.now()
     logdebug(LOGGER, 'Connecting to RabbitMQ at %s... (%s)', params.host,
              get_now_utc_as_formatted_string())
     loginfo(LOGGER, 'Opening connection to RabbitMQ...')
     self.thread._connection = pika.SelectConnection(
         parameters=params,
         on_open_callback=self.on_connection_open,
         on_open_error_callback=self.on_connection_error,
         on_close_callback=self.on_connection_closed
         # Removed parameter, see https://github.com/pika/pika/issues/961
     )
예제 #13
0
 def __join_and_rescue(self):
     success = self.__join()
     if success:
         self.__rescue_leftovers()
     else:
         for i in xrange(10):
             time.sleep(1)  # blocking
         loginfo(LOGGER, 'Joining the thread failed once... Retrying.')
         self.__thread.add_event_force_finish()
         success = self.__join()
         if success:
             self.__rescue_leftovers()
         else:
             logerror(LOGGER, 'Joining failed again. No idea why.')
예제 #14
0
    def __react_on_ack(self, deliv_tag, multiple):
        if self.__first_confirm_receival:
            self.__first_confirm_receival = False
            loginfo(LOGGER,
                    'Received first message confirmation from RabbitMQ.')

        if multiple:
            logtrace(
                LOGGER,
                'Received "ACK" for multiple messages from messaging service.')
            self.__react_on_multiple_delivery_ack(deliv_tag)
        else:
            logtrace(
                LOGGER,
                'Received "ACK" for single message from messaging service.')
            self.__react_on_single_delivery_ack(deliv_tag)
예제 #15
0
    def __postparations_after_successful_feeding(self, msg):

        # Pass the successfully published message and its delivery_number
        # to the confirmer module, to wait for its confirmation.
        # Increase the delivery number for the next message.
        self.thread.put_to_unconfirmed_delivery_tags(self.__delivery_number)
        self.thread.put_to_unconfirmed_messages_dict(self.__delivery_number, msg)
        self.__delivery_number += 1

        # Logging
        self.__logcounter_success += 1
        log_every_x_times(LOGGER, self.__logcounter_success, self.__LOGFREQUENCY, 'Actual publish to channel done (trigger no. %i, publish no. %i).', self.__logcounter_trigger, self.__logcounter_success)
        logtrace(LOGGER, 'Publishing messages %i to RabbitMQ... done.', self.__delivery_number-1)
        if (self.__delivery_number-1 == 1):
            loginfo(LOGGER, 'First message published to RabbitMQ.')
        logdebug(LOGGER, 'Message published (no. %i)', self.__delivery_number-1)
예제 #16
0
    def on_connection_open(self, unused_connection):
        logdebug(LOGGER, 'Opening connection... done.')
        loginfo(LOGGER, 'Connection to RabbitMQ at %s opened... (%s)',
                self.__node_manager.get_connection_parameters().host,
                get_now_utc_as_formatted_string())

        # Tell the main thread we're open for events now:
        # When the connection is open, the thread is ready to accept events.
        # Note: It was already ready when the connection object was created,
        # not just now that it's actually open. There was already a call to
        # "...stop_waiting..." in start_waiting_for_events(), which quite
        # certainly was carried out before this callback. So this call to
        # "...stop_waiting..." is likelily redundant!
        self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events(
        )
        self.__please_open_rabbit_channel()
예제 #17
0
 def make_permanently_closed_by_user(self):
     # This changes the state of the state machine!
     # This needs to be called from the shutter module
     # in case there is a force_finish while the connection
     # is already closed (as the callback on_connection_closed
     # is not called then).
     self.statemachine.set_to_permanently_unavailable()
     logtrace(LOGGER, 'Stop waiting for events due to user interrupt!')
     logtrace(LOGGER,
              'Permanent close: Stopping ioloop of connection %s...',
              self.thread._connection)
     self.thread._connection.ioloop.stop()
     loginfo(LOGGER, 'Stopped listening for RabbitMQ events (%s).',
             get_now_utc_as_formatted_string())
     logdebug(
         LOGGER,
         'Connection to messaging service closed by user. Will not reopen.')
예제 #18
0
    def dataset_publication_finished(self, ignore_exception=False):
        '''
        This is the "commit". It triggers the creation/update of handles.
        
        * Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied)
        * The dataset publication message is created and sent to the queue.
        * All file publication messages are sent to the queue.

        '''
        self.__check_if_dataset_publication_allowed_right_now()
        self.__check_data_consistency(ignore_exception)
        self.__coupler.start_rabbit_business() # Synchronous: Opens connection. Asynchronous: Ignored.
        self.__create_and_send_dataset_publication_message_to_queue()
        self.__send_existing_file_messages_to_queue()
        self.__coupler.done_with_rabbit_business() # Synchronous: Closes connection. Asynchronous: Ignored.
        self.__set_machine_state_to_finished()
        loginfo(LOGGER, 'Requesting to publish PID for dataset "%s" (version %s) and its files at "%s" (handle %s).', self.__drs_id, self.__version_number, self.__data_node, self.__dataset_handle)
예제 #19
0
 def __check_for_already_arrived_messages_and_publish_them(self):
     logdebug(LOGGER,
              'Checking if messages have arrived in the meantime...')
     num = self.thread.get_num_unpublished()
     if num > 0:
         loginfo(
             LOGGER,
             'Ready to publish messages to RabbitMQ. %s messages are already waiting to be published.',
             num)
         for i in xrange(int(num * 1.1)):
             self.thread.add_event_publish_message()
     else:
         loginfo(LOGGER, 'Ready to publish messages to RabbitMQ.')
         logdebug(
             LOGGER,
             'Ready to publish messages to RabbitMQ. No messages waiting yet.'
         )
예제 #20
0
 def __check_data_consistency(self, ignore_exception):
     checker = esgfpid.assistant.consistency.Checker(
         coupler=self.__coupler,
         drs_id=self.__drs_id,
         version_number=self.__version_number,
         data_node=self.__data_node
     )
     check_possible = checker.can_run_check()
     if check_possible:
         check_passed = checker.data_consistency_check(self.__list_of_file_handles)
         if check_passed:
             loginfo(LOGGER, 'Data consistency check passed for dataset %s.', self.__dataset_handle)
         else:
             msg = 'Dataset consistency check failed'
             logwarn(LOGGER, msg)
             if not ignore_exception:
                 raise esgfpid.exceptions.InconsistentFilesetException(msg)
     else:
         logdebug(LOGGER, 'No consistency check was carried out.')
예제 #21
0
 def __wait_and_trigger_reconnection(self, connection, wait_seconds):
     if self.statemachine.is_FORCE_FINISHED():
         # TODO This is the same code as above. Make a give_up function from it?
         #self.statemachine.set_to_permanently_unavailable()
         #self.statemachine.detail_could_not_connect = True
         #max_tries = defaults.RABBIT_RECONNECTION_MAX_TRIES
         errormsg = (
             'Permanently failed to connect to RabbitMQ. Tried all hosts %s until received a force-finish. Giving up. No PID requests will be sent.'
             % list(self.__all_hosts_that_were_tried))
         logerror(LOGGER, errormsg)
         raise PIDServerException(errormsg)
     else:
         self.statemachine.set_to_waiting_to_be_available()
         loginfo(LOGGER, 'Trying to reconnect to RabbitMQ in %s seconds.',
                 wait_seconds)
         connection.add_timeout(wait_seconds, self.reconnect)
         logtrace(LOGGER,
                  'Reconnect event added to connection %s (not to %s)',
                  connection, self.thread._connection)
예제 #22
0
    def __finish_gently(self):
        # Called directly from outside the thread!
        #self.statemachine.asked_to_closed_by_publisher = True # TODO

        # Make sure no more messages are accepted from publisher # TODO
        # while publishes/confirms are still accepted:
        #if self.statemachine.is_available_for_client_publishes():
        #    self.statemachine.set_to_wanting_to_stop()

        # Inform user
        if self.__are_any_messages_pending():
            wait_seconds = defaults.RABBIT_ASYN_FINISH_WAIT_SECONDS
            max_waits = defaults.RABBIT_ASYN_FINISH_MAX_TRIES
            loginfo(LOGGER, 'Preparing to close PID module. Some messages are pending. Maximum waiting time: %i seconds. (%s)', wait_seconds*max_waits, get_now_utc_as_formatted_string())
        else:
            loginfo(LOGGER, 'Closing PID module. No pending messages. (%s)', get_now_utc_as_formatted_string())

        # Go through decision tree (close or wait for pending messages)
        self.__close_decision_iterations = 1
        self.__is_in_process_of_gently_closing = True
        self.recursive_decision_about_closing()
예제 #23
0
    def on_connection_closed(self, connection, exception):

        # From the docs: The exception will either be an instance of
        # exceptions.ConnectionClosed if a fully-open connection was closed
        # by user or broker or exception of another type (...)
        if isinstance(exception, pika.exceptions.ConnectionClosed):
            reply_code = exception.reply_code
            reply_text = exception.reply_text
        else:
            # TODO Not sure when this might happen, could not reproduce.
            reply_code = -1
            reply_text = str(exception)

        loginfo(LOGGER, 'Connection to RabbitMQ was closed. Reason: %s.',
                reply_text)
        self.thread._channel = None
        if self.__was_user_shutdown(reply_code, reply_text):
            loginfo(LOGGER, 'Connection to %s closed.',
                    self.__node_manager.get_connection_parameters().host)
            self.make_permanently_closed_by_user()
        elif self.__was_permanent_error(reply_code, reply_text):
            loginfo(LOGGER, 'Connection to %s closed.',
                    self.__node_manager.get_connection_parameters().host)
            self.__make_permanently_closed_by_error(connection, reply_text)
        else:
            # This reconnects to next host:
            self.on_connection_error(connection, reply_text)
예제 #24
0
    def __have_we_waited_enough_now(self, iteration):
        logdebug(LOGGER, 'Gentle finish (iteration %i): Check if the rabbit thread has waited long enough...', self.__close_decision_iterations)

        wait_seconds = defaults.RABBIT_ASYN_FINISH_WAIT_SECONDS
        max_waits = defaults.RABBIT_ASYN_FINISH_MAX_TRIES

        tried = iteration
        waited = iteration-1

        # Logging:
        logdebug(LOGGER, 'Gentle finish (iteration %i): At this point we have tried %i times and waited %i/%i times (%i seconds)', self.__close_decision_iterations, tried, waited, max_waits, waited*wait_seconds)
        log_every_x_seconds = 2
        if ((waited*wait_seconds)%log_every_x_seconds==0 or waited>=max_waits):
            msg = self.__get_string_about_pending_messages()
            loginfo(LOGGER, 'Still pending: %s messages... (waited %.1f/%.1f seconds)', msg, waited*wait_seconds, max_waits*wait_seconds)
        
        # Return:
        if waited >= max_waits:
            logdebug(LOGGER, 'Gentle finish (iteration %i): The rabbit thread has waited long enough for pending messages at close down.', self.__close_decision_iterations)
            return True
        logdebug(LOGGER, 'Gentle finish (iteration %i): We should wait a little more for pending messages.', self.__close_decision_iterations)
        return False
예제 #25
0
    def __finish_gently(self):
        # Called directly from outside the thread!

        # No more messages can arrive from publisher (because
        # the main thread blocks), but publishes/confirms are still
        # accepted.

        # Inform user
        if self.__are_any_messages_pending():
            wait_seconds = defaults.RABBIT_ASYN_FINISH_WAIT_SECONDS
            max_waits = defaults.RABBIT_ASYN_FINISH_MAX_TRIES
            loginfo(
                LOGGER,
                'Preparing to close PID module. Some messages are pending. Maximum waiting time: %i seconds. (%s)',
                wait_seconds * max_waits, get_now_utc_as_formatted_string())
        else:
            loginfo(LOGGER, 'Closing PID module. No pending messages. (%s)',
                    get_now_utc_as_formatted_string())

        # Go through decision tree (close or wait for pending messages)
        self.__close_decision_iterations = 1
        self.__is_in_process_of_gently_closing = True
        self.recursive_decision_about_closing()
예제 #26
0
    def make_permanently_closed_by_error(self, connection, reply_text):
        # This changes the state of the state machine!
        # This needs to be called if there is a permanent
        # error and we don't want the library to reonnect,
        # and we also don't want to pretend it was closed
        # by the user.
        # This is really rarely needed.
        self.statemachine.set_to_permanently_unavailable()
        logtrace(LOGGER, 'Stop waiting for events due to permanent error!')

        # In case the main thread was waiting for any synchronization event.
        self.thread.unblock_events()

        # Close ioloop, which blocks the thread.
        logdebug(LOGGER,
                 'Permanent close: Stopping ioloop of connection %s...',
                 self.thread._connection)
        self.thread._connection.ioloop.stop()
        loginfo(LOGGER, 'Stopped listening for RabbitMQ events (%s).',
                get_now_utc_as_formatted_string())
        logdebug(
            LOGGER,
            'Connection to messaging service closed because of error. Will not reopen. Reason: %s',
            reply_text)
예제 #27
0
    def unpublish_all_dataset_versions(self):

        # If solr is switched off, consumer must find versions:
        if self._coupler.is_solr_switched_off():
            self.__unpublish_allversions_consumer_must_find_versions()

        # Get handles or version numbers from solr:
        else:
            all_handles_or_versionnumbers = self.__get_all_handles_or_versionnumbers()
            all_handles = all_handles_or_versionnumbers['dataset_handles']
            all_version_numbers = all_handles_or_versionnumbers['version_numbers']

            # If we can have all versions' handles, it's easy.
            if all_handles is not None:
                self.__unpublish_all_dataset_versions_by_handle(all_handles)

            # If not, we have the version numbers (and can make the handles from them):
            elif all_version_numbers is not None:
                self.__unpublish_all_dataset_versions_by_version(all_version_numbers)

            # If neither, let the consumer find them
            else:
                self.__unpublish_allversions_consumer_must_find_versions()
                loginfo(LOGGER, 'Requesting to unpublish all versions of dataset %s from %s', self._drs_id, self._data_node)
예제 #28
0
 def on_connection_closed(self, connection, reply_code, reply_text):
     loginfo(LOGGER, 'Connection to RabbitMQ was closed. Reason: %s.',
             reply_text)
     self.thread._channel = None
     if self.__was_user_shutdown(reply_code, reply_text):
         loginfo(LOGGER, 'Connection to %s closed.',
                 self.__node_manager.get_connection_parameters().host)
         self.make_permanently_closed_by_user()
     elif self.__was_permanent_error(reply_code, reply_text):
         loginfo(LOGGER, 'Connection to %s closed.',
                 self.__node_manager.get_connection_parameters().host)
         self.__make_permanently_closed_by_error(connection, reply_text)
     else:
         # This reconnects to next host_
         self.on_connection_error(connection, reply_text)
예제 #29
0
 def on_connection_closed(self, connection, reply_code, reply_text):
     loginfo(LOGGER, 'Connection to RabbitMQ was closed. Reason: %s.',
             reply_text)
     self.thread._channel = None
     if self.__was_user_shutdown(reply_code, reply_text):
         loginfo(LOGGER, 'Connection to %s closed.',
                 self.__node_manager.get_connection_parameters().host)
         self.make_permanently_closed_by_user()
     elif self.__was_permanent_error(reply_code, reply_text):
         loginfo(LOGGER, 'Connection to %s closed.',
                 self.__node_manager.get_connection_parameters().host)
         self.make_permanently_closed_by_error(connection, reply_text)
     else:
         #reopen_seconds = defaults.RABBIT_RECONNECTION_SECONDS
         #self.__wait_and_trigger_reconnection(connection, reopen_seconds)
         self.on_connection_error(connection, reply_text)
예제 #30
0
    def unpublish_one_dataset_version(self, **args):
        optional_args = ['dataset_handle', 'version_number']
        esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)

        handle = args['dataset_handle']
        version_number = args['version_number']

        if handle and version_number:
            self.__both_given(handle, version_number)
            loginfo(LOGGER, 'Requesting to unpublish version %s of dataset %s from %s (handle: %s).', version_number, self._drs_id, self._data_node, handle)
        elif handle:
            self.__only_handle_given(handle)
            loginfo(LOGGER, 'Requesting to unpublish a version of dataset %s from %s (handle: %s).', self._drs_id, self._data_node, handle)
        elif version_number:
            self.__only_version_given(version_number)
            loginfo(LOGGER, 'Requesting to unpublish version %s of dataset %s from %s.', version_number, self._drs_id, self._data_node)
        else:
            msg = 'Neither a handle nor a version number were specified for unpublication!'
            raise esgfpid.exceptions.ArgumentError(msg)