Exemple #1
0
    def _retry_unique_violation(self):
        """ Context manager: catch Unique constraint error and retry the
        job later.

        When we execute several jobs workers concurrently, it happens
        that 2 jobs are creating the same record at the same time (binding
        record created by :meth:`_export_dependency`), resulting in:

            IntegrityError: duplicate key value violates unique
            constraint "my_backend_product_product_odoo_uniq"
            DETAIL:  Key (backend_id, odoo_id)=(1, 4851) already exists.

        In that case, we'll retry the import just later.

        .. warning:: The unique constraint must be created on the
                     binding record to prevent 2 bindings to be created
                     for the same External record.

        """
        try:
            yield
        except psycopg2.IntegrityError as err:
            if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
                raise RetryableJobError(
                    "A database error caused the failure of the job:\n"
                    "%s\n\n"
                    "Likely due to 2 concurrent jobs wanting to create "
                    "the same record. The job will be retried later." % err)
            else:
                raise
Exemple #2
0
    def _retry_unique_violation(self):
        """ Context manager: catch Unique constraint error and retry the
        job later.

        When we execute several jobs workers concurrently, it happens
        that 2 jobs are creating the same record at the same time
        (especially product templates as they are shared by a lot of
        sales orders), resulting in:

            IntegrityError: duplicate key value violates unique
            constraint "jira_project_project_external_id_uniq"
            DETAIL:  Key (backend_id, external_id)=(1, 4851) already exists.

        In that case, we'll retry the import just later.

        """
        try:
            yield
        except IntegrityError as err:
            if err.pgcode == errorcodes.UNIQUE_VIOLATION:
                raise RetryableJobError(
                    'A database error caused the failure of the job:\n'
                    '%s\n\n'
                    'Likely due to 2 concurrent jobs wanting to create '
                    'the same record. The job will be retried later.' % err)
            else:
                raise
Exemple #3
0
    def _lock(self):
        """ Lock the binding record.

        Lock the binding record so we are sure that only one export
        job is running for this record if concurrent jobs have to export the
        same record.

        When concurrent jobs try to export the same record, the first one
        will lock and proceed, the others will fail to lock and will be
        retried later.

        This behavior works also when the export becomes multilevel
        with :meth:`_export_dependencies`. Each level will set its own lock
        on the binding record it has to export.

        """
        sql = "SELECT id FROM %s WHERE ID = %%s FOR UPDATE NOWAIT" % self.model._table
        try:
            self.env.cr.execute(sql, (self.binding.id, ), log_exceptions=False)
        except psycopg2.OperationalError:
            _logger.info(
                "A concurrent job is already exporting the same "
                "record (%s with id %s). Job delayed later.",
                self.model._name,
                self.binding.id,
            )
            raise RetryableJobError(
                "A concurrent job is already exporting the same record "
                "(%s with id %s). The job will be retried later." %
                (self.model._name, self.binding.id))
Exemple #4
0
    def _retry_unique_violation(self):
        """Catch Unique constraint error and retry the job later.

        When we execute several jobs workers concurrently, sometimes 2 jobs
        are creating the same record at the same time (binding
        record created by :meth:`_export_dependency`), resulting in::

            IntegrityError: duplicate key value violates unique
            constraint "helpscout_product_product_odoo_uniq"
            DETAIL:  Key (backend_id, odoo_id)=(1, 4851) already exists.

        In that case, we'll retry the import just later.

        Warning:
            The unique constraint must be created on the binding record to
            prevent 2 bindings for the same HelpScout record.
        """
        try:
            yield
        except psycopg2.IntegrityError as err:
            if err.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
                raise RetryableJobError(_(
                    'A database error caused the failure of the job:\n'
                    '%s\n\n'
                    'Likely due to 2 concurrent jobs wanting to create '
                    'the same record. The job will be retried later.'
                ) % (
                    err,
                ))
            else:
                raise
Exemple #5
0
    def lock(self, records, seconds=None, ignore_retry=True):
        """Lock the records.

        Lock the record so we are sure that only one job is running for this
        record(s) if concurrent jobs have to run a job for the same record(s).
        When concurrent jobs try to work on the same record(s), the first one
        will lock and proceed, the others will fail to acquire it and will be
        retried later
        (:exc:`~odoo.addons.queue_job.exception.RetryableJobError` is raised).

        The lock is using a ``FOR UPDATE NOWAIT`` so any concurrent transaction
        trying FOR UPDATE/UPDATE will be rejected until the current transaction
        is committed or rollbacked.

        A classical use case for this is to prevent concurrent exports.

        The following parameters are forwarded to the exception
        :exc:`~odoo.addons.queue_job.exception.RetryableJobError`

        :param seconds: In case of retry because the lock cannot be acquired,
                        in how many seconds it must be retried. If not set,
                        the queue_job configuration is used.
        :param ignore_retry: If True, the retry counter of the job will not be
                             increased.
        """
        sql = ("SELECT id FROM %s WHERE ID IN %%s FOR UPDATE NOWAIT" %
               self.model._table)
        try:
            self.env.cr.execute(sql, (tuple(records.ids), ),
                                log_exceptions=False)
        except psycopg2.OperationalError:
            _logger.info(
                "A concurrent job is already working on the same "
                "record (%s with one id in %s). Job delayed later.",
                self.model._name,
                tuple(records.ids),
            )
            raise RetryableJobError(
                "A concurrent job is already working on the same record "
                "(%s with one id in %s). The job will be retried later." % (
                    self.model._name,
                    tuple(records.ids),
                ),
                seconds=seconds,
                ignore_retry=ignore_retry)
 def _call(self, method, arguments):
     try:
         custom_url = self.magento.use_custom_api_path
         _logger.debug("Start calling Magento api %s", method)
         with magentolib.API(self.magento.location,
                             self.magento.username,
                             self.magento.password,
                             full_url=custom_url) as api:
             # When Magento is installed on PHP 5.4+, the API
             # may return garble data if the arguments contain
             # trailing None.
             if isinstance(arguments, list):
                 while arguments and arguments[-1] is None:
                     arguments.pop()
             start = datetime.now()
             try:
                 result = api.call(method, arguments)
             except:
                 _logger.error("api.call(%s, %s) failed", method, arguments)
                 raise
             else:
                 _logger.debug("api.call(%s, %s) returned %s in %s seconds",
                               method, arguments, result,
                               (datetime.now() - start).seconds)
             # Uncomment to record requests/responses in ``recorder``
             # record(method, arguments, result)
             return result
     except (socket.gaierror, socket.error, socket.timeout) as err:
         raise NetworkRetryableError(
             'A network error caused the failure of the job: '
             '%s' % err)
     except xmlrpclib.ProtocolError as err:
         if err.errcode in [502,   # Bad gateway
                            503,   # Service unavailable
                            504]:  # Gateway timeout
             raise RetryableJobError(
                 'A protocol error caused the failure of the job:\n'
                 'URL: %s\n'
                 'HTTP/HTTPS headers: %s\n'
                 'Error code: %d\n'
                 'Error message: %s\n' %
                 (err.url, err.headers, err.errcode, err.errmsg))
         else:
             raise
Exemple #7
0
    def lock(self, records):
        """Lock the records.

        Lock the record so we are sure that only one job is running for this
        record(s) if concurrent jobs have to create a message for the same
        record(s).
        When concurrent jobs try to work on the same record(s), the first one
        will lock and proceed, the others will fail to lock and will be retried
        later.
        """
        sql = ("SELECT id FROM %s WHERE ID IN %%s FOR UPDATE NOWAIT" %
               self.model._table)
        try:
            self.env.cr.execute(sql, (tuple(records.ids), ),
                                log_exceptions=False)
        except psycopg2.OperationalError:
            _logger.info(
                'A concurrent job is already working on the same '
                'record (%s with one id in %s). Job delayed later.',
                self.model._name, tuple(records.ids))
            raise RetryableJobError(
                'A concurrent job is already working on the same record '
                '(%s with one id in %s). The job will be retried later.' %
                (self.model._name, tuple(records.ids)))
Exemple #8
0
    def _call(self, method, endpoint, data=None):
        try:
            api = API(
                url=self.woo.location,
                consumer_key=self.woo.consumer_key,
                consumer_secret=self.woo.consumer_secret,
                version=self.woo.version,
                wp_api=True,
                timeout=None,
            )
            if api:
                if method == 'GET':
                    r = api.get(endpoint)
                elif method == 'POST':
                    r = api.post(endpoint, data)
                elif method == 'PUT':
                    r = api.put(endpoint, data)

                if r.status_code in [200, 201]:
                    res = r.json()
                    _logger.info('%s: %s' % (endpoint, res))
                    return r.json()
                else:
                    code = r.json().get('code')
                    message = r.json().get('message')
                    _logger.info('%s: %s, %s' % (endpoint, code, message))
                    err_res = {'id': None}
                    if 'customers' in endpoint:
                        if code == 'registration-error-email-exists' and method == 'POST':
                            return self._call(method='GET',
                                              endpoint='customers?search=%s' %
                                              data.get('email'))[0]
                        elif code == 'registration-error-invalid-email':
                            return err_res
                        elif code == 'rest_missing_callback_param':
                            return err_res
                        elif code == 'woocommerce_rest_invalid_id':
                            return err_res
                    elif 'products/categories' in endpoint:
                        if code == 'term_exists' and method == 'POST':
                            items = []
                            for item in self._call(
                                    method='GET',
                                    endpoint='products/categories?search=%s' %
                                    data.get('name')):
                                if item.get('name') == data.get(
                                        'name') and data.get(
                                            'parent', 0) == item.get('parent'):
                                    items.append(item)

                            return items[0]
                        elif code == 'woocommerce_rest_term_invalid' and message == 'Resource does not exist.':
                            return err_res
                    elif 'products' in endpoint:
                        if code == 'woocommerce_rest_product_invalid_id':
                            return err_res
                    elif 'orders' in endpoint:
                        if code == 'woocommerce_rest_shop_order_invalid_id':
                            return err_res

        except (socket.gaierror, socket.error, socket.timeout) as err:
            raise NetworkRetryableError(
                'A network error caused the failure of the job: '
                '%s' % err)
        except xmlrpclib.ProtocolError as err:
            if err.errcode in [
                    502,  # Bad gateway
                    503,  # Service unavailable
                    504
            ]:  # Gateway timeout
                raise RetryableJobError(
                    'A protocol error caused the failure of the job:\n'
                    'URL: %s\n'
                    'HTTP/HTTPS headers: %s\n'
                    'Error code: %d\n'
                    'Error message: %s\n' %
                    (err.url, err.headers, err.errcode, err.errmsg))
            else:
                raise
Exemple #9
0
def retryable_error_task(session):
    raise RetryableJobError('Must be retried later')
Exemple #10
0
    def run(self, external_id, force=False, record=None, **kwargs):
        """ Run the synchronization

        A record can be given, reducing number of calls when
        a call already returns data (example: user returns addresses)

        :param external_id: identifier of the record on Jira
        """
        self.external_id = external_id
        lock_name = 'import({}, {}, {}, {})'.format(
            self.backend_record._name,
            self.backend_record.id,
            self.model._name,
            self.external_id,
        )
        # Keep a lock on this import until the transaction is committed
        self.advisory_lock_or_retry(lock_name,
                                    retry_seconds=RETRY_ON_ADVISORY_LOCK)
        if record is not None:
            self.external_record = record
        else:
            try:
                self.external_record = self._get_external_data()
            except IDMissingInBackend:
                return _('Record does no longer exist in Jira')
        binding = self._get_binding()
        if not binding:
            with self.do_in_new_connector_env() as new_connector_env:
                # Even when we use an advisory lock, we may have
                # concurrent issues.
                # Explanation:
                # We import Partner A and B, both of them import a
                # partner category X.
                #
                # The squares represent the duration of the advisory
                # lock, the transactions starts and ends on the
                # beginnings and endings of the 'Import Partner'
                # blocks.
                # T1 and T2 are the transactions.
                #
                # ---Time--->
                # > T1 /------------------------\
                # > T1 | Import Partner A       |
                # > T1 \------------------------/
                # > T1        /-----------------\
                # > T1        | Imp. Category X |
                # > T1        \-----------------/
                #                     > T2 /------------------------\
                #                     > T2 | Import Partner B       |
                #                     > T2 \------------------------/
                #                     > T2        /-----------------\
                #                     > T2        | Imp. Category X |
                #                     > T2        \-----------------/
                #
                # As you can see, the locks for Category X do not
                # overlap, and the transaction T2 starts before the
                # commit of T1. So no lock prevents T2 to import the
                # category X and T2 does not see that T1 already
                # imported it.
                #
                # The workaround is to open a new DB transaction at the
                # beginning of each import (e.g. at the beginning of
                # "Imp. Category X") and to check if the record has been
                # imported meanwhile. If it has been imported, we raise
                # a Retryable error so T2 is rollbacked and retried
                # later (and the new T3 will be aware of the category X
                # from the its inception).
                binder = new_connector_env.get_connector_unit(Binder)
                if binder.to_internal(self.external_id):
                    raise RetryableJobError(
                        'Concurrent error. The job will be retried later',
                        seconds=RETRY_WHEN_CONCURRENT_DETECTED,
                        ignore_retry=True)

        reason = self.must_skip()
        if reason:
            return reason

        if not force and self._is_uptodate(binding):
            return _('Already up-to-date.')

        self._before_import()

        # import the missing linked resources
        self._import_dependencies()

        self._import(binding, **kwargs)
 def check_active(self, backend):
     if not backend.active:
         raise RetryableJobError(
             'Backend %s is inactive please consider changing this'
             'The job will be retried later.' %
             (backend.name,))