Exemple #1
0
def atomic_lock(lock_dir, lock_name, lifetime=60):
    """A atomic, NFS safe implementation of a file lock.

    Uses `flufl.lock` under the hood. Can be used as a context manager::

        with atomic_lock(settings.TMP_PATH, 'extraction-1234'):
            extract_xpi(...)

    :return: `True` if the lock was attained, we are owning the lock,
             `False` if there is an already existing lock.
    """
    lock_name = lock_name + '.lock'
    count = _lock_count.get(lock_name, 0)

    log.debug('Acquiring lock %s, count is %d.' % (lock_name, count))

    lock_name = os.path.join(lock_dir, lock_name)
    lock = flufl.lock.Lock(lock_name, lifetime=timedelta(seconds=lifetime))

    try:
        # set `timeout=0` to avoid any process blocking but catch the
        # TimeOutError raised instead.
        lock.lock(timeout=timedelta(seconds=0))
    except flufl.lock.AlreadyLockedError:
        # This process already holds the lock
        yield False
    except flufl.lock.TimeOutError:
        # Some other process holds the lock.
        # Let's break the lock if it has expired. Unfortunately
        # there's a bug in flufl.lock so let's do this manually.
        # Bug: https://gitlab.com/warsaw/flufl.lock/merge_requests/1
        release_time = lock._releasetime
        max_release_time = release_time + flufl.lock._lockfile.CLOCK_SLOP

        if (release_time != -1 and datetime.now() > max_release_time):
            # Break the lock and try to aquire again
            lock._break()
            lock.lock(timeout=timedelta(seconds=0))
            yield lock.is_locked
        else:
            # Already locked
            yield False
    else:
        # Is usually `True` but just in case there were some weird `lifetime`
        # values set we return the check if we really attained the lock.
        yield lock.is_locked

    if lock.is_locked:
        log.debug('Releasing lock %s.' % lock.details[2])
        lock.unlock()
def atomic_lock(lock_dir, lock_name, lifetime=60):
    """A atomic, NFS safe implementation of a file lock.

    Uses `flufl.lock` under the hood. Can be used as a context manager::

        with atomic_lock(settings.TMP_PATH, 'extraction-1234'):
            extract_xpi(...)

    :return: `True` if the lock was attained, we are owning the lock,
             `False` if there is an already existing lock.
    """
    lock_name = lock_name + '.lock'
    count = _lock_count.get(lock_name, 0)

    log.debug('Acquiring lock %s, count is %d.' % (lock_name, count))

    lock_name = os.path.join(lock_dir, lock_name)
    lock = flufl.lock.Lock(lock_name, lifetime=timedelta(seconds=lifetime))

    try:
        # set `timeout=0` to avoid any process blocking but catch the
        # TimeOutError raised instead.
        lock.lock(timeout=timedelta(seconds=0))
    except flufl.lock.AlreadyLockedError:
        # This process already holds the lock
        yield False
    except flufl.lock.TimeOutError:
        # Some other process holds the lock.
        # Let's break the lock if it has expired. Unfortunately
        # there's a bug in flufl.lock so let's do this manually.
        # Bug: https://gitlab.com/warsaw/flufl.lock/merge_requests/1
        release_time = lock._releasetime
        max_release_time = release_time + flufl.lock._lockfile.CLOCK_SLOP

        if (release_time != -1 and datetime.now() > max_release_time):
            # Break the lock and try to aquire again
            lock._break()
            lock.lock(timeout=timedelta(seconds=0))
            yield lock.is_locked
        else:
            # Already locked
            yield False
    else:
        # Is usually `True` but just in case there were some weird `lifetime`
        # values set we return the check if we really attained the lock.
        yield lock.is_locked

    if lock.is_locked:
        log.debug('Releasing lock %s.' % lock.details[2])
        lock.unlock()
Exemple #3
0
def test_atomic_lock_with():
    lock = flufl.lock.Lock('/tmp/test-atomic-lock1.lock')

    assert not lock.is_locked

    lock.lock()

    assert lock.is_locked

    with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
        assert not lock_attained

    lock.unlock()

    with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
        assert lock_attained
Exemple #4
0
def test_atomic_lock_with():
    lock = flufl.lock.Lock('/tmp/test-atomic-lock1.lock')

    assert not lock.is_locked

    lock.lock()

    assert lock.is_locked

    with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
        assert not lock_attained

    lock.unlock()

    with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
        assert lock_attained
    def _get_request_content(self, path):
        """
        Return a dictionary with the content of the given YAML file path.
        Locks the request directory while loading.
        Raise an exception on failure.
        """

        lock_path = os.path.join(self.dir, ".lock")
        lock = flufl.lock.Lock(lock_path, lifetime=_requests_lock_lifetime)

        # This may raise a TimeoutError, which the caller will handle.
        lock.lock(timeout=_requests_lock_timeout)

        # Try returning the file content. Unlock in any case.
        try:
            with open(path) as stream:
                return yaml.safe_load(stream)
        finally:
            lock.unlock()
Exemple #6
0
def main():
    """
    Put a request file according to the push that invoked this script.
    """
    env = os.environ
    if ("GL_REPO" not in env) or ("GL_USER" not in env):
        return 1

    repo = env["GL_REPO"]
    user = env["GL_USER"]

    if not repo or not user:
        return 2

    # Admin repository doesn't trigger requests.
    if repo == "gitolite-admin":
        return 0

    # When receiving an update from developer "joe" to the repository
    # devs/joe/repo, The request name is going to be in this format:
    # 2000-01-01.10.00.00_joe_devs.joe.repo.yaml
    time_string = datetime.strftime(datetime.now(), "%Y-%m-%d.%H.%M.%S")
    repo_name_path = repo.replace("/", ".")
    request_name = time_string + "_" + user + "_" + repo_name_path + ".yaml"
    request_path = os.path.join(_requests_dir, request_name)

    info = {
        "user": user,
        "repo": repo
    }

    try:
        yaml_info = yaml.safe_dump(info)
    except Exception:
        return 3

    # Both the lock file and the request need to be readable and writable
    # by the group, but git/gitolite impose a umask. We change it explicitly.
    # Reference:
    # https://stackoverflow.com/questions/11574271/git-change-
    # default-umask-when-update-file
    os.umask(0002)
    lock_path = os.path.join(_requests_dir, ".lock")
    lock = flufl.lock.Lock(lock_path, lifetime=_lock_lifetime)

    # Try to acquire the lock with the defined timeout.
    # We don't use "with lock" because then we can't define custom timeout.
    try:
        lock.lock(timeout=_lock_timeout)
    except Exception:
        return 4

    # Try to write the file. Clean up the lock in any case.
    try:
        with open(request_path, "w") as stream:
            stream.write(yaml_info)
    except Exception:
        return 5
    finally:
        try:
            lock.unlock()
        except Exception:
            # This can happen if the lock expired.
            # We don't care about this for now, because it shouldn't
            # take so much time to write the YAML.
            pass

    return 0
    def process_mailbox(self, mailbox):

        self.set_log_context('mailbox', mailbox)
        mailbox_path = self.get_mailbox_path(mailbox)    

        # Try to obtain a lock on the mailbox.
        try:
            lock = flufl.lock.Lock(lockfile=os.path.join(mailbox_path, '.bsmtp_lock'), lifetime=datetime.timedelta(minutes=15))
            lock.retry_errnos = [116]
            lock.lock(timeout=datetime.timedelta(seconds=5))
        except flufl.lock.LockError as e:
            self.log('cannot obtain lock on mailbox path {0} ({1})'.format(mailbox_path, e))
            lock = None

        if lock is not None and lock.is_locked:
            try:
               
                # We try not to run too often: if the last run was not succesful,
                # wait at least a minute at first, doubling the time between
                # consecutive runs until we run once an hour.
                last = 0
                now = int(time.time())
                success = now
                throttle_file = os.path.join(mailbox_path, '.bsmtp_run')
                try:
                    with open(throttle_file) as tf:
                        r = tf.readline().split()
                        last = int(r[0])
                        success = int(r[1])
                        now = int(time.time())
                        if (last != success) and (now - last) < 3600:	# last run not a success and less than an hour ago
                            if (
                                ((now - last) < 60) or			# unsuccesful run less than a minute ago
                                ((now - last) < (last - success ))	# last unsuccesful run less than double the amount to the last succesful run
                            ):
                                # Skip this run.
                                return
                except:
                    pass 
        
                mailbox_files = self.get_mailbox_files(mailbox)
                mail_count = len(mailbox_files)
                deliver_count = 0
                tempfail_count = 0
                tempfail_limit = self.config.get('bsmtp_deliver', {}).get('tempfail_limit', 10)
                
                if mail_count > 0:
                
                    # There is email to be delivered. Get delivery details.
            
                    (domain, mailhost, port, tls_type, tls_digest_type, tls_digest) = self.query_config_table('bsmtp_delivery', mailbox)[0]
                    report = mailboxutil.MailboxReport(mailbox, 'rejection')
                    
                    # Make a Rejected directory if it does not yet exist
                    rejection_dir = os.path.join(self.get_mailbox_path(mailbox, self.config.get('bsmtp_mailbox', {}).get('rejection', {}).get('folder', 'Rejected')), 'new')
                    if not os.path.exists(rejection_dir):
                        os.makedirs(rejection_dir)

                    # Try to connect to the mailserver, deliver all the
                    # mail, then exit.
                    
                    try:
                        with delivery.Delivery(mailbox=mailbox) as d:
                            for (subdir, filename) in mailbox_files:
                                for k in ('id', 'from', 'to', 'size', 'message_id', 'queue_id'):
                                    self.del_log_context(k)
                                source_subpath = os.path.join(subdir, filename)
                                source_path = os.path.join(mailbox_path, source_subpath)
                                self.set_log_context('message_file', source_subpath)
                                with open(source_path) as mail_file:
                                    try:
                                        self.log('starting delivery', priority=syslog.LOG_DEBUG)
                                        d.deliver(mail_file)
                                        os.unlink(mail_file.name)
                                        deliver_count = deliver_count + 1
                                        tempfail_count = 0
                                    except delivery.DeliveryError as e:
                                        if e.fatal:
                                            self.log('rejected ({0})'.format(e))
                                            report.add_email(
                                                sender=e.sender,
                                                recipient=e.recipient,
                                                message_path=source_subpath,
                                                extra='{0} {1}'.format(e.code, e.message)
                                            )
                                            os.rename(
                                                source_path,
                                                os.path.join(rejection_dir, filename)
                                            )
                                            # Just lower the tempfail
                                            # counter for a permanent error,
                                            # but don't reset it to 0.
                                            if tempfail_count > 0:
                                                tempfail_count = tempfail_count - 1
                                        else:
                                            tempfail_count = tempfail_count + 1
                                            self.log('tempfail {0} ({1})'.format(tempfail_count, e))
                                if tempfail_count >= tempfail_limit:
                                    self.log('giving up after {0} subsequent tempfail responses'.format(tempfail_count))
                                    break
                                lock.refresh()
                                self.del_log_context('message_file')
                    except socket.error as e:
                        self.log('socket error ({0})'.format(e))
                        pass
                    except smtplib.SMTPConnectError as e:
                        self.log('SMTP connection error ({0})'.format(e))
                        pass

                    # Did we deliver any email?
                    if deliver_count > 0:

                        # Remove the maildirsize file if we're configured to do
                        # so. This triggers quota recalculating by the mailserver.
                        if self.config.get('bsmtp_mailbox', {}).get('remove_maildirsize', False):
                            try:
                                os.unlink(os.path.join(mailbox_path, 'maildirsize'))
                            except OSError:
                                pass
             
                        # Update time of last succesful delivery
                        success = int(time.time())

                    # Send the report if there is something in it.
                    report.send()
                else:	# empty mailbox, always succesful
                    success = int(time.time())

                # Record the time of this run.
                try:
                    with open(throttle_file, 'w') as tf:
                        tf.write("{0} {1}\n".format(int(time.time()), success))
                except:
                    pass
            
            except Exception as e:
                self.log('caught exception {0}'.format(e), priority=syslog.LOG_ERR)
                raise
                    
            finally:
                lock.unlock()
 def release_lock(lock):
     "Release a lock currently held by Instant."
     if lock.is_locked:
         hostname, pid, lockname = lock.details
         instant_debug("Releasing lock %s." % (lockname))
         lock.unlock()