Exemplo n.º 1
0
    def test_willLogAtLevel(self):
        """
        willLogAtLevel()
        """
        log = Logger()

        for level in logLevels:
            if cmpLogLevels(level, log.level()) < 0:
                self.assertFalse(log.willLogAtLevel(level))
            else:
                self.assertTrue(log.willLogAtLevel(level))
Exemplo n.º 2
0
    def test_logMethodTruthiness_Logger(self):
        """
        Logger's log level functions/methods have true/false
        value based on whether they will log.
        """
        log = Logger()

        for level in logLevels:
            enabled = getattr(log, level + "_enabled")
            if enabled:
                self.assertTrue(log.willLogAtLevel(level))
            else:
                self.assertFalse(log.willLogAtLevel(level))
Exemplo n.º 3
0
    def emit(self, level, format=None, **kwargs):
        if False:
            print "*"*60
            print "level =", level
            print "format =", format
            for key, value in kwargs.items():
                print key, "=", value
            print "*"*60

        def observer(event):
            self.event = event

        twistedLogging.addObserver(observer)
        try:
            Logger.emit(self, level, format, **kwargs)
        finally:
            twistedLogging.removeObserver(observer)

        self.emitted = {
            "level":  level,
            "format": format,
            "kwargs": kwargs,
        }
Exemplo n.º 4
0
class ReverseProxyResource(LeafResource):
    """
    A L{LeafResource} which always performs a reverse proxy operation.
    """
    log = Logger()

    implements(iweb.IResource)

    def __init__(self, poolID, *args, **kwargs):
        """

        @param poolID: identifier of the pool to use
        @type poolID: C{str}
        """

        self.poolID = poolID
        self._args = args
        self._kwargs = kwargs
        self.allowMultiHop = False

    def isCollection(self):
        return True

    def exists(self):
        return False

    @inlineCallbacks
    def renderHTTP(self, request):
        """
        Do the reverse proxy request and return the response.

        @param request: the incoming request that needs to be proxied.
        @type request: L{Request}

        @return: Deferred L{Response}
        """

        self.log.info("{method} {poolID}:{uri} {proto}", method=request.method, poolID=self.poolID, uri=request.uri, proto="HTTP/{}.{}".format(*request.clientproto))

        # Check for multi-hop
        if not self.allowMultiHop:
            x_server = request.headers.getHeader("x-forwarded-server")
            if x_server:
                for item in x_server:
                    if item.lower() == config.ServerHostName.lower():
                        self.log.error("ReverseProxy loop detected: x-forwarded-server:{xfs}", xfs=str(x_server))
                        raise HTTPError(StatusResponse(responsecode.BAD_GATEWAY, "Too many x-forwarded-server hops"))

        clientPool = getHTTPClientPool(self.poolID)
        proxyRequest = ClientRequest(request.method, request.uri, request.headers, request.stream)

        # Need x-forwarded-(for|host|server) headers. First strip any existing ones out, then add ours
        proxyRequest.headers.removeHeader("x-forwarded-host")
        proxyRequest.headers.removeHeader("x-forwarded-for")
        proxyRequest.headers.removeHeader("x-forwarded-server")
        proxyRequest.headers.addRawHeader("x-forwarded-host", request.host)
        proxyRequest.headers.addRawHeader("x-forwarded-for", request.remoteAddr.host)
        proxyRequest.headers.addRawHeader("x-forwarded-server", config.ServerHostName)

        try:
            response = yield clientPool.submitRequest(proxyRequest)
        except Exception as e:
            self.log.error("ReverseProxy failed: {exc}", exc=str(e))
            raise HTTPError(StatusResponse(responsecode.BAD_GATEWAY, "Cannot connect via poolID={poolID}".format(poolID=self.poolID)))

        returnValue(response)
Exemplo n.º 5
0
"""
CalDAV implicit processing.

This module handles the processing of scheduling messages being delivered to a calendar user's inbox.
It determines who is scheduling (organizer or attendee) and applies the scheduling message changes
to the recipient's calendar data as well as depositing the scheduling message in the inbox. For users
who have an auto-accept option on, it will also handle the automatic response. Also, refreshes of other
attendees (when one attendee replies) are triggered from here.
"""

__all__ = [
    "ImplicitProcessor",
    "ImplicitProcessorException",
]

log = Logger()

class ImplicitProcessorException(Exception):

    def __init__(self, msg):
        self.msg = msg



class ImplicitProcessor(object):

    def __init__(self):
        pass


    @inlineCallbacks
Exemplo n.º 6
0
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.failure import Failure
from twext.web2 import responsecode
from txdav.xml import element as davxml
from twext.web2.dav.http import MultiStatusResponse, PropertyStatusResponseQueue
from twext.web2.dav.util import davXMLFromStream
from twext.web2.dav.util import parentForURL
from twext.web2.http import HTTPError, StatusResponse

from twext.python.log import Logger
from twext.web2.dav.http import ErrorResponse

from twistedcaldav import caldavxml

log = Logger()

@inlineCallbacks
def http_MKCALENDAR(self, request):
    """
    Respond to a MKCALENDAR request.
    (CalDAV-access-09, section 5.3.1)
    """

    #
    # Check authentication and access controls
    #
    parent = (yield request.locateResource(parentForURL(request.uri)))
    yield parent.authorize(request, (davxml.Bind(),))

    if self.exists():
Exemplo n.º 7
0
from twext.python.log import Logger

from twisted.internet import reactor, protocol
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.web import http_headers
from twisted.web.client import Agent
from twisted.web.http import MOVED_PERMANENTLY, TEMPORARY_REDIRECT, FOUND

from urlparse import urlparse
from urlparse import urlunparse

__all__ = [
    "getURL",
]

log = Logger()

class AccumulatingProtocol(protocol.Protocol):
    """
    L{AccumulatingProtocol} is an L{IProtocol} implementation which collects
    the data delivered to it and can fire a Deferred when it is connected or
    disconnected.

    @ivar made: A flag indicating whether C{connectionMade} has been called.
    @ivar data: A string giving all the data passed to C{dataReceived}.
    @ivar closed: A flag indicated whether C{connectionLost} has been called.
    @ivar closedReason: The value of the I{reason} parameter passed to
        C{connectionLost}.
    @ivar closedDeferred: If set to a L{Deferred}, this will be fired when
        C{connectionLost} is called.
    """
Exemplo n.º 8
0
from pycalendar.datetime import DateTime

import datetime
import hashlib
import traceback

__all__ = [
    "ScheduleOrganizerWork",
    "ScheduleReplyWork",
    "ScheduleReplyCancelWork",
    "ScheduleRefreshWork",
    "ScheduleAutoReplyWork",
]

log = Logger()



class ScheduleWorkMixin(WorkItem):
    """
    Base class for common schedule work item behavior. Sub-classes have their own class specific data
    stored in per-class tables. This class manages a SCHEDULE_WORK table that contains the work id, job id
    and iCalendar UID. That table is used for locking all scheduling items with the same UID, as well as
    allow smart re-scheduling/ordering etc of items with the same UID.
    """

    # Track when all work is complete (needed for unit tests)
    _allDoneCallback = None
    _queued = 0
Exemplo n.º 9
0
class DataStoreTransaction(object):
    """
    In-memory implementation of a data store transaction.
    """
    log = Logger()

    def __init__(self, dataStore, name):
        """
        Initialize a transaction; do not call this directly, instead call
        L{CalendarStore.newTransaction}.

        @param calendarStore: The store that created this transaction.

        @type calendarStore: L{CalendarStore}
        """
        self._dataStore = dataStore
        self._termination = None
        self._operations = []
        self._postCommitOperations = []
        self._postAbortOperations = []
        self._tracker = _CommitTracker(name)

    def store(self):
        return self._dataStore

    def addOperation(self, operation, name):
        self._operations.append(operation)
        self._tracker.info.append(name)

    def _terminate(self, mode):
        """
        Check to see if this transaction has already been terminated somehow,
        either via committing or aborting, and if not, note that it has been
        terminated.

        @param mode: The manner of the termination of this transaction.

        @type mode: C{str}

        @raise AlreadyFinishedError: This transaction has already been
            terminated.
        """
        if self._termination is not None:
            raise AlreadyFinishedError("already %s" % (self._termination,))
        self._termination = mode
        self._tracker.done = True

    def abort(self):
        self._terminate("aborted")

        for operation in self._postAbortOperations:
            operation()

    def commit(self):
        self._terminate("committed")

        self.committed = True
        undos = []

        for operation in self._operations:
            try:
                undo = operation()
                if undo is not None:
                    undos.append(undo)
            except:
                self.log.debug("Undoing DataStoreTransaction")
                for undo in undos:
                    try:
                        undo()
                    except:
                        self.log.error("Cannot undo DataStoreTransaction")
                raise

        for operation in self._postCommitOperations:
            operation()

    def postCommit(self, operation):
        self._postCommitOperations.append(operation)

    def postAbort(self, operation):
        self._postAbortOperations.append(operation)
Exemplo n.º 10
0
]

import string

from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.http import HTTPError, StatusResponse
from txweb2.dav.http import ErrorResponse
from txweb2.dav.util import davXMLFromStream
from txdav.xml import element as davxml
from txdav.xml.element import lookupElement
from txdav.xml.base import encodeXMLName

log = Logger()


max_number_of_matches = 500


class NumberOfMatchesWithinLimits(Exception):

    def __init__(self, limit):

        super(NumberOfMatchesWithinLimits, self).__init__()
        self.limit = limit

    def maxLimit(self):
        return self.limit
Exemplo n.º 11
0
__all__ = ["DAVFile"]

from twisted.python.filepath import InsecurePath
from twisted.internet.defer import succeed, deferredGenerator, waitForDeferred

from twext.python.log import Logger
from txweb2 import http_headers
from txweb2 import responsecode
from txweb2.dav.resource import DAVResource, davPrivilegeSet
from txweb2.dav.resource import TwistedGETContentMD5
from txweb2.dav.util import bindMethods
from txweb2.http import HTTPError, StatusResponse
from txweb2.static import File

log = Logger()


try:
    from txweb2.dav.xattrprops import xattrPropertyStore as DeadPropertyStore
except ImportError:
    log.info("No dead property store available; using nonePropertyStore.")
    log.info("Setting of dead properties will not be allowed.")
    from txweb2.dav.noneprops import NonePropertyStore as DeadPropertyStore

class DAVFile (DAVResource, File):
    """
    WebDAV-accessible File resource.

    Extends txweb2.static.File to handle WebDAV methods.
    """
Exemplo n.º 12
0
class MailSender(object):
    """
    Generates outbound IMIP messages and sends them.
    """
    log = Logger()

    def __init__(self, address, suppressionDays, smtpSender, language):
        self.address = address
        self.suppressionDays = suppressionDays
        self.smtpSender = smtpSender
        self.language = language

    @inlineCallbacks
    def outbound(self, txn, originator, recipient, calendar, onlyAfter=None):
        """
        Generates and sends an outbound IMIP message.

        @param txn: the transaction to use for looking up/creating tokens
        @type txn: L{CommonStoreTransaction}
        """

        if onlyAfter is None:
            duration = Duration(days=self.suppressionDays)
            onlyAfter = DateTime.getNowUTC() - duration

        icaluid = calendar.resourceUID()
        method = calendar.propertyValue("METHOD")

        # Clean up the attendee list which is purely used within the human
        # readable email message (not modifying the calendar body)
        attendees = []
        for attendeeProp in calendar.getAllAttendeeProperties():
            cutype = attendeeProp.parameterValue("CUTYPE", "INDIVIDUAL")
            if cutype == "INDIVIDUAL":
                cn = attendeeProp.parameterValue("CN", None)
                if cn is not None:
                    cn = cn.decode("utf-8")
                cuaddr = normalizeCUAddr(attendeeProp.value())
                if cuaddr.startswith("mailto:"):
                    mailto = cuaddr[7:]
                    if not cn:
                        cn = mailto
                else:
                    emailAddress = attendeeProp.parameterValue("EMAIL", None)
                    if emailAddress:
                        mailto = emailAddress
                    else:
                        mailto = None

                if cn or mailto:
                    attendees.append((cn, mailto))

        toAddr = recipient
        if not recipient.lower().startswith("mailto:"):
            raise ValueError("ATTENDEE address '%s' must be mailto: for iMIP "
                             "operation." % (recipient, ))
        recipient = recipient[7:]

        if method != "REPLY":
            # Invites and cancellations:

            # Reuse or generate a token based on originator, toAddr, and
            # event uid
            record = (yield txn.imipGetToken(originator, toAddr.lower(),
                                             icaluid))
            if record is None:

                # Because in the past the originator was sometimes in mailto:
                # form, lookup an existing token by mailto: as well
                organizerProperty = calendar.getOrganizerProperty()
                organizerEmailAddress = organizerProperty.parameterValue(
                    "EMAIL", None)
                if organizerEmailAddress is not None:
                    record = (yield txn.imipGetToken(
                        "mailto:%s" % (organizerEmailAddress.lower(), ),
                        toAddr.lower(), icaluid))

            if record is None:
                record = (yield txn.imipCreateToken(originator, toAddr.lower(),
                                                    icaluid))
                self.log.debug(
                    "Mail gateway created token {token}for {orig} "
                    "(originator), {recip} (recipient) and {uid} (icaluid)",
                    token=record.token,
                    orig=originator,
                    recip=toAddr,
                    uid=icaluid,
                )
                inviteState = "new"

            else:
                self.log.debug(
                    "Mail gateway reusing token {token} for {orig} "
                    "(originator), {recip} (recipient) and {uid} (icaluid)",
                    token=record.token,
                    orig=originator,
                    recip=toAddr,
                    uid=icaluid,
                )
                inviteState = "update"
            token = record.token

            fullServerAddress = self.address
            _ignore_name, serverAddress = email.utils.parseaddr(
                fullServerAddress)
            pre, post = serverAddress.split('@')
            addressWithToken = "%s+%s@%s" % (pre, token, post)

            organizerProperty = calendar.getOrganizerProperty()
            organizerEmailAddress = organizerProperty.parameterValue(
                "EMAIL", None)
            organizerValue = organizerProperty.value()
            organizerProperty.setValue("mailto:%s" % (addressWithToken, ))

            # If the organizer is also an attendee, update that attendee value
            # to match
            organizerAttendeeProperty = calendar.getAttendeeProperty(
                [organizerValue])
            if organizerAttendeeProperty is not None:
                organizerAttendeeProperty.setValue("mailto:%s" %
                                                   (addressWithToken, ))

            # The email's From will include the originator's real name email
            # address if available.  Otherwise it will be the server's email
            # address (without # + addressing)
            if organizerEmailAddress:
                orgEmail = fromAddr = organizerEmailAddress
            else:
                fromAddr = serverAddress
                orgEmail = None
            cn = calendar.getOrganizerProperty().parameterValue('CN', None)
            if cn is None:
                cn = u'Calendar Server'
                orgCN = orgEmail
            else:
                orgCN = cn = cn.decode("utf-8")

            # a unicode cn (rather than an encode string value) means the
            # from address will get properly encoded per rfc2047 within the
            # MIMEMultipart in generateEmail
            formattedFrom = "%s <%s>" % (cn, fromAddr)

            # Reply-to address will be the server+token address

        else:  # REPLY
            inviteState = "reply"

            # Look up the attendee property corresponding to the originator
            # of this reply
            originatorAttendeeProperty = calendar.getAttendeeProperty(
                [originator])
            formattedFrom = fromAddr = originator = ""
            if originatorAttendeeProperty:
                originatorAttendeeEmailAddress = (
                    originatorAttendeeProperty.parameterValue("EMAIL", None))
                if originatorAttendeeEmailAddress:
                    formattedFrom = fromAddr = originator = (
                        originatorAttendeeEmailAddress)

            organizerMailto = str(calendar.getOrganizer())
            if not organizerMailto.lower().startswith("mailto:"):
                raise ValueError("ORGANIZER address '%s' must be mailto: "
                                 "for REPLY." % (organizerMailto, ))
            orgEmail = organizerMailto[7:]

            orgCN = calendar.getOrganizerProperty().parameterValue('CN', None)
            if orgCN:
                orgCN = orgCN.decode("utf-8")
            addressWithToken = formattedFrom

        # At the point we've created the token in the db, which we always
        # want to do, but if this message is for an event completely in
        # the past we don't want to actually send an email.
        if not calendar.hasInstancesAfter(onlyAfter):
            self.log.debug("Skipping IMIP message for old event")
            returnValue(True)

        # Now prevent any "internal" CUAs from being exposed by converting
        # to mailto: if we have one
        for attendeeProp in calendar.getAllAttendeeProperties():
            cutype = attendeeProp.parameterValue('CUTYPE', None)
            if cutype == "INDIVIDUAL":
                cuaddr = normalizeCUAddr(attendeeProp.value())
                if not cuaddr.startswith("mailto:"):
                    emailAddress = attendeeProp.parameterValue("EMAIL", None)
                    if emailAddress:
                        attendeeProp.setValue("mailto:%s" % (emailAddress, ))

        msgId, message = self.generateEmail(inviteState,
                                            calendar,
                                            orgEmail,
                                            orgCN,
                                            attendees,
                                            formattedFrom,
                                            addressWithToken,
                                            recipient,
                                            language=self.language)

        try:
            success = (yield
                       self.smtpSender.sendMessage(fromAddr, toAddr, msgId,
                                                   message))
            returnValue(success)
        except Exception, e:
            self.log.error("Failed to send IMIP message ({ex})", ex=str(e))
            returnValue(False)
Exemplo n.º 13
0
__all__ = ["DAVFile"]

from twisted.python.filepath import InsecurePath
from twisted.internet.defer import succeed, deferredGenerator, waitForDeferred

from twext.python.log import Logger
from txweb2 import http_headers
from txweb2 import responsecode
from txweb2.dav.resource import DAVResource, davPrivilegeSet
from txweb2.dav.resource import TwistedGETContentMD5
from txweb2.dav.util import bindMethods
from txweb2.http import HTTPError, StatusResponse
from txweb2.static import File

log = Logger()


try:
    from txweb2.dav.xattrprops import xattrPropertyStore as DeadPropertyStore
except ImportError:
    log.info("No dead property store available; using nonePropertyStore.")
    log.info("Setting of dead properties will not be allowed.")
    from txweb2.dav.noneprops import NonePropertyStore as DeadPropertyStore


class DAVFile (DAVResource, File):
    """
    WebDAV-accessible File resource.

    Extends txweb2.static.File to handle WebDAV methods.
Exemplo n.º 14
0
class NotificationObject(FancyEqMixin, object):
    """
    This used to store XML data and an XML element for the type. But we are now switching it
    to use JSON internally. The app layer will convert that to XML and fill in the "blanks" as
    needed for the app.
    """
    log = Logger()

    implements(INotificationObject)

    compareAttributes = (
        "_resourceID",
        "_home",
    )

    _objectSchema = schema.NOTIFICATION

    def __init__(self, home, uid):
        self._home = home
        self._resourceID = None
        self._uid = uid
        self._md5 = None
        self._size = None
        self._created = None
        self._modified = None
        self._notificationType = None
        self._notificationData = None

    def __repr__(self):
        return "<%s: %s>" % (self.__class__.__name__, self._resourceID)

    @classproperty
    def _allColumnsByHomeIDQuery(cls):
        """
        DAL query to load all columns by home ID.
        """
        obj = cls._objectSchema
        return Select(
            [
                obj.RESOURCE_ID, obj.NOTIFICATION_UID, obj.MD5,
                Len(obj.NOTIFICATION_DATA), obj.NOTIFICATION_TYPE, obj.CREATED,
                obj.MODIFIED
            ],
            From=obj,
            Where=(obj.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")))

    @classmethod
    @inlineCallbacks
    def loadAllObjects(cls, parent):
        """
        Load all child objects and return a list of them. This must create the
        child classes and initialize them using "batched" SQL operations to keep
        this constant wrt the number of children. This is an optimization for
        Depth:1 operations on the collection.
        """

        results = []

        # Load from the main table first
        dataRows = (yield
                    cls._allColumnsByHomeIDQuery.on(parent._txn,
                                                    homeID=parent._resourceID))

        if dataRows:
            # Get property stores for all these child resources (if any found)
            propertyStores = (yield PropertyStore.forMultipleResources(
                parent.uid(),
                None,
                None,
                parent._txn,
                schema.NOTIFICATION.RESOURCE_ID,
                schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID,
                parent._resourceID,
            ))

        # Create the actual objects merging in properties
        for row in dataRows:
            child = cls(parent, None)
            (
                child._resourceID,
                child._uid,
                child._md5,
                child._size,
                child._notificationType,
                child._created,
                child._modified,
            ) = tuple(row)
            child._created = parseSQLTimestamp(child._created)
            child._modified = parseSQLTimestamp(child._modified)
            try:
                child._notificationType = json.loads(child._notificationType)
            except ValueError:
                pass
            if isinstance(child._notificationType, unicode):
                child._notificationType = child._notificationType.encode(
                    "utf-8")
            child._loadPropertyStore(
                props=propertyStores.get(child._resourceID, None))
            results.append(child)

        returnValue(results)

    @classproperty
    def _oneNotificationQuery(cls):
        no = cls._objectSchema
        return Select(
            [
                no.RESOURCE_ID, no.MD5,
                Len(no.NOTIFICATION_DATA), no.NOTIFICATION_TYPE, no.CREATED,
                no.MODIFIED
            ],
            From=no,
            Where=(no.NOTIFICATION_UID == Parameter("uid")).And(
                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")))

    @inlineCallbacks
    def initFromStore(self):
        """
        Initialise this object from the store, based on its UID and home
        resource ID. We read in and cache all the extra metadata from the DB to
        avoid having to do DB queries for those individually later.

        @return: L{self} if object exists in the DB, else C{None}
        """
        rows = (yield
                self._oneNotificationQuery.on(self._txn,
                                              uid=self._uid,
                                              homeID=self._home._resourceID))
        if rows:
            (
                self._resourceID,
                self._md5,
                self._size,
                self._notificationType,
                self._created,
                self._modified,
            ) = tuple(rows[0])
            self._created = parseSQLTimestamp(self._created)
            self._modified = parseSQLTimestamp(self._modified)
            try:
                self._notificationType = json.loads(self._notificationType)
            except ValueError:
                pass
            if isinstance(self._notificationType, unicode):
                self._notificationType = self._notificationType.encode("utf-8")
            self._loadPropertyStore()
            returnValue(self)
        else:
            returnValue(None)

    def _loadPropertyStore(self, props=None, created=False):
        if props is None:
            props = NonePropertyStore(self._home.uid())
        self._propertyStore = props

    def properties(self):
        return self._propertyStore

    def id(self):
        """
        Retrieve the store identifier for this object.

        @return: store identifier.
        @rtype: C{int}
        """
        return self._resourceID

    @property
    def _txn(self):
        return self._home._txn

    def notificationCollection(self):
        return self._home

    def uid(self):
        return self._uid

    def name(self):
        return self.uid() + ".xml"

    @classproperty
    def _newNotificationQuery(cls):
        no = cls._objectSchema
        return Insert(
            {
                no.NOTIFICATION_HOME_RESOURCE_ID: Parameter("homeID"),
                no.NOTIFICATION_UID: Parameter("uid"),
                no.NOTIFICATION_TYPE: Parameter("notificationType"),
                no.NOTIFICATION_DATA: Parameter("notificationData"),
                no.MD5: Parameter("md5"),
            },
            Return=[no.RESOURCE_ID, no.CREATED, no.MODIFIED])

    @classproperty
    def _updateNotificationQuery(cls):
        no = cls._objectSchema
        return Update(
            {
                no.NOTIFICATION_TYPE: Parameter("notificationType"),
                no.NOTIFICATION_DATA: Parameter("notificationData"),
                no.MD5: Parameter("md5"),
            },
            Where=(
                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("homeID")).And(
                    no.NOTIFICATION_UID == Parameter("uid")),
            Return=no.MODIFIED)

    @inlineCallbacks
    def setData(self,
                uid,
                notificationtype,
                notificationdata,
                inserting=False):
        """
        Set the object resource data and update and cached metadata.
        """

        notificationtext = json.dumps(notificationdata)
        self._notificationType = notificationtype
        self._md5 = hashlib.md5(notificationtext).hexdigest()
        self._size = len(notificationtext)
        if inserting:
            rows = yield self._newNotificationQuery.on(
                self._txn,
                homeID=self._home._resourceID,
                uid=uid,
                notificationType=json.dumps(self._notificationType),
                notificationData=notificationtext,
                md5=self._md5)
            self._resourceID, self._created, self._modified = (
                rows[0][0],
                parseSQLTimestamp(rows[0][1]),
                parseSQLTimestamp(rows[0][2]),
            )
            self._loadPropertyStore()
        else:
            rows = yield self._updateNotificationQuery.on(
                self._txn,
                homeID=self._home._resourceID,
                uid=uid,
                notificationType=json.dumps(self._notificationType),
                notificationData=notificationtext,
                md5=self._md5)
            self._modified = parseSQLTimestamp(rows[0][0])
        self._notificationData = notificationdata

    _notificationDataFromID = Select(
        [_objectSchema.NOTIFICATION_DATA],
        From=_objectSchema,
        Where=_objectSchema.RESOURCE_ID == Parameter("resourceID"))

    @inlineCallbacks
    def notificationData(self):
        if self._notificationData is None:
            self._notificationData = (yield self._notificationDataFromID.on(
                self._txn, resourceID=self._resourceID))[0][0]
            try:
                self._notificationData = json.loads(self._notificationData)
            except ValueError:
                pass
            if isinstance(self._notificationData, unicode):
                self._notificationData = self._notificationData.encode("utf-8")
        returnValue(self._notificationData)

    def contentType(self):
        """
        The content type of NotificationObjects is text/xml.
        """
        return MimeType.fromString("text/xml")

    def md5(self):
        return self._md5

    def size(self):
        return self._size

    def notificationType(self):
        return self._notificationType

    def created(self):
        return datetimeMktime(self._created)

    def modified(self):
        return datetimeMktime(self._modified)
Exemplo n.º 15
0
class NotificationCollection(FancyEqMixin, _SharedSyncLogic):
    log = Logger()

    implements(INotificationCollection)

    compareAttributes = (
        "_ownerUID",
        "_resourceID",
    )

    _revisionsSchema = schema.NOTIFICATION_OBJECT_REVISIONS
    _homeSchema = schema.NOTIFICATION_HOME

    _externalClass = None

    @classmethod
    def makeClass(cls, transaction, homeData):
        """
        Build the actual home class taking into account the possibility that we might need to
        switch in the external version of the class.

        @param transaction: transaction
        @type transaction: L{CommonStoreTransaction}
        @param homeData: home table column data
        @type homeData: C{list}
        """

        status = homeData[cls.homeColumns().index(cls._homeSchema.STATUS)]
        if status == _HOME_STATUS_EXTERNAL:
            home = cls._externalClass(transaction, homeData)
        else:
            home = cls(transaction, homeData)
        return home.initFromStore()

    @classmethod
    def homeColumns(cls):
        """
        Return a list of column names to retrieve when doing an ownerUID->home lookup.
        """

        # Common behavior is to have created and modified

        return (
            cls._homeSchema.RESOURCE_ID,
            cls._homeSchema.OWNER_UID,
            cls._homeSchema.STATUS,
        )

    @classmethod
    def homeAttributes(cls):
        """
        Return a list of attributes names to map L{homeColumns} to.
        """

        # Common behavior is to have created and modified

        return (
            "_resourceID",
            "_ownerUID",
            "_status",
        )

    def __init__(self, txn, homeData):

        self._txn = txn

        for attr, value in zip(self.homeAttributes(), homeData):
            setattr(self, attr, value)

        self._txn = txn
        self._dataVersion = None
        self._notifications = {}
        self._notificationNames = None
        self._syncTokenRevision = None

        # Make sure we have push notifications setup to push on this collection
        # as well as the home it is in
        self._notifiers = dict([(
            factory_name,
            factory.newNotifier(self),
        ) for factory_name, factory in txn._notifierFactories.items()])

    @inlineCallbacks
    def initFromStore(self):
        """
        Initialize this object from the store.
        """

        yield self._loadPropertyStore()
        returnValue(self)

    @property
    def _home(self):
        """
        L{NotificationCollection} serves as its own C{_home} for the purposes of
        working with L{_SharedSyncLogic}.
        """
        return self

    @classmethod
    def notificationsWithUID(cls, txn, uid, status=None, create=False):
        return cls.notificationsWith(txn,
                                     None,
                                     uid,
                                     status=status,
                                     create=create)

    @classmethod
    def notificationsWithResourceID(cls, txn, rid):
        return cls.notificationsWith(txn, rid, None)

    @classmethod
    @inlineCallbacks
    def notificationsWith(cls, txn, rid, uid, status=None, create=False):
        """
        @param uid: I'm going to assume uid is utf-8 encoded bytes
        """
        if rid is not None:
            query = cls._homeSchema.RESOURCE_ID == rid
        elif uid is not None:
            query = cls._homeSchema.OWNER_UID == uid
            if status is not None:
                query = query.And(cls._homeSchema.STATUS == status)
            else:
                statusSet = (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
                )
                if txn._allowDisabled:
                    statusSet += (_HOME_STATUS_DISABLED, )
                query = query.And(cls._homeSchema.STATUS.In(statusSet))
        else:
            raise AssertionError("One of rid or uid must be set")

        results = yield Select(
            cls.homeColumns(),
            From=cls._homeSchema,
            Where=query,
        ).on(txn)

        if len(results) > 1:
            # Pick the best one in order: normal, disabled and external
            byStatus = dict([
                (result[cls.homeColumns().index(cls._homeSchema.STATUS)],
                 result) for result in results
            ])
            result = byStatus.get(_HOME_STATUS_NORMAL)
            if result is None:
                result = byStatus.get(_HOME_STATUS_DISABLED)
            if result is None:
                result = byStatus.get(_HOME_STATUS_EXTERNAL)
        elif results:
            result = results[0]
        else:
            result = None

        if result:
            # Return object that already exists in the store
            homeObject = yield cls.makeClass(txn, result)
            returnValue(homeObject)
        else:
            # Can only create when uid is specified
            if not create or uid is None:
                returnValue(None)

            # Determine if the user is local or external
            record = yield txn.directoryService().recordWithUID(
                uid.decode("utf-8"))
            if record is None:
                raise DirectoryRecordNotFoundError(
                    "Cannot create home for UID since no directory record exists: {}"
                    .format(uid))

            if status is None:
                createStatus = _HOME_STATUS_NORMAL if record.thisServer(
                ) else _HOME_STATUS_EXTERNAL
            elif status == _HOME_STATUS_MIGRATING:
                if record.thisServer():
                    raise RecordNotAllowedError(
                        "Cannot migrate a user data for a user already hosted on this server"
                    )
                createStatus = status
            elif status in (
                    _HOME_STATUS_NORMAL,
                    _HOME_STATUS_EXTERNAL,
            ):
                createStatus = status
            else:
                raise RecordNotAllowedError(
                    "Cannot create home with status {}: {}".format(
                        status, uid))

            # Use savepoint so we can do a partial rollback if there is a race
            # condition where this row has already been inserted
            savepoint = SavepointAction("notificationsWithUID")
            yield savepoint.acquire(txn)

            try:
                resourceid = (yield Insert(
                    {
                        cls._homeSchema.OWNER_UID: uid,
                        cls._homeSchema.STATUS: createStatus,
                    },
                    Return=cls._homeSchema.RESOURCE_ID).on(txn))[0][0]
            except Exception:
                # FIXME: Really want to trap the pg.DatabaseError but in a non-
                # DB specific manner
                yield savepoint.rollback(txn)

                # Retry the query - row may exist now, if not re-raise
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=query,
                ).on(txn)
                if results:
                    homeObject = yield cls.makeClass(txn, results[0])
                    returnValue(homeObject)
                else:
                    raise
            else:
                yield savepoint.release(txn)

                # Note that we must not cache the owner_uid->resource_id
                # mapping in the query cacher when creating as we don't want that to appear
                # until AFTER the commit
                results = yield Select(
                    cls.homeColumns(),
                    From=cls._homeSchema,
                    Where=cls._homeSchema.RESOURCE_ID == resourceid,
                ).on(txn)
                homeObject = yield cls.makeClass(txn, results[0])
                if homeObject.normal():
                    yield homeObject._initSyncToken()
                    yield homeObject.notifyChanged()
                returnValue(homeObject)

    @inlineCallbacks
    def _loadPropertyStore(self):
        self._propertyStore = yield PropertyStore.load(
            self._ownerUID,
            self._ownerUID,
            None,
            self._txn,
            self._resourceID,
            notifyCallback=self.notifyChanged)

    def __repr__(self):
        return "<%s: %s>" % (self.__class__.__name__, self._resourceID)

    def id(self):
        """
        Retrieve the store identifier for this collection.

        @return: store identifier.
        @rtype: C{int}
        """
        return self._resourceID

    @classproperty
    def _dataVersionQuery(cls):
        nh = cls._homeSchema
        return Select([nh.DATAVERSION],
                      From=nh,
                      Where=nh.RESOURCE_ID == Parameter("resourceID"))

    @inlineCallbacks
    def dataVersion(self):
        if self._dataVersion is None:
            self._dataVersion = (yield self._dataVersionQuery.on(
                self._txn, resourceID=self._resourceID))[0][0]
        returnValue(self._dataVersion)

    def name(self):
        return "notification"

    def uid(self):
        return self._ownerUID

    def status(self):
        return self._status

    @inlineCallbacks
    def setStatus(self, newStatus):
        """
        Mark this home as being purged.
        """
        # Only if different
        if self._status != newStatus:
            yield Update(
                {
                    self._homeSchema.STATUS: newStatus
                },
                Where=(self._homeSchema.RESOURCE_ID == self._resourceID),
            ).on(self._txn)
            self._status = newStatus

    def normal(self):
        """
        Is this an normal (internal) home.

        @return: a L{bool}.
        """
        return self._status == _HOME_STATUS_NORMAL

    def external(self):
        """
        Is this an external home.

        @return: a L{bool}.
        """
        return self._status == _HOME_STATUS_EXTERNAL

    def owned(self):
        return True

    def ownerHome(self):
        return self._home

    def viewerHome(self):
        return self._home

    def notificationObjectRecords(self):
        return NotificationObjectRecord.querysimple(
            self._txn, notificationHomeResourceID=self.id())

    @inlineCallbacks
    def notificationObjects(self):
        results = (yield NotificationObject.loadAllObjects(self))
        for result in results:
            self._notifications[result.uid()] = result
        self._notificationNames = sorted([result.name() for result in results])
        returnValue(results)

    _notificationUIDsForHomeQuery = Select(
        [schema.NOTIFICATION.NOTIFICATION_UID],
        From=schema.NOTIFICATION,
        Where=schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
            "resourceID"))

    @inlineCallbacks
    def listNotificationObjects(self):
        """
        List the names of all notification objects in this collection. Note that the name
        is actually the UID value with ".xml" appended, as per L{NotificationObject.name}.
        """
        if self._notificationNames is None:
            rows = yield self._notificationUIDsForHomeQuery.on(
                self._txn, resourceID=self._resourceID)
            self._notificationNames = sorted([row[0] + ".xml" for row in rows])
        returnValue(self._notificationNames)

    # used by _SharedSyncLogic.resourceNamesSinceRevision()
    def listObjectResources(self):
        return self.listNotificationObjects()

    def _nameToUID(self, name):
        """
        Based on the file-backed implementation, the 'name' is just uid +
        ".xml".
        """
        return name.rsplit(".", 1)[0]

    def notificationObjectWithName(self, name):
        return self.notificationObjectWithUID(self._nameToUID(name))

    @memoizedKey("uid", "_notifications")
    @inlineCallbacks
    def notificationObjectWithUID(self, uid):
        """
        Create an empty notification object first then have it initialize itself
        from the store.
        """
        no = NotificationObject(self, uid)
        no = (yield no.initFromStore())
        returnValue(no)

    @inlineCallbacks
    def writeNotificationObject(self, uid, notificationtype, notificationdata):

        inserting = False
        notificationObject = yield self.notificationObjectWithUID(uid)
        if notificationObject is None:
            notificationObject = NotificationObject(self, uid)
            inserting = True
        yield notificationObject.setData(uid,
                                         notificationtype,
                                         notificationdata,
                                         inserting=inserting)
        if inserting:
            yield self._insertRevision(notificationObject.name())
            if self._notificationNames is not None:
                self._notificationNames.append(notificationObject.name())
        else:
            yield self._updateRevision(notificationObject.name())
        yield self.notifyChanged()
        returnValue(notificationObject)

    def removeNotificationObjectWithName(self, name):
        if self._notificationNames is not None:
            self._notificationNames.remove(name)
        return self.removeNotificationObjectWithUID(self._nameToUID(name))

    _removeByUIDQuery = Delete(
        From=schema.NOTIFICATION,
        Where=(schema.NOTIFICATION.NOTIFICATION_UID == Parameter("uid")).And(
            schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID == Parameter(
                "resourceID")))

    @inlineCallbacks
    def removeNotificationObjectWithUID(self, uid):
        yield self._removeByUIDQuery.on(self._txn,
                                        uid=uid,
                                        resourceID=self._resourceID)
        self._notifications.pop(uid, None)
        yield self._deleteRevision("%s.xml" % (uid, ))
        yield self.notifyChanged()

    _initSyncTokenQuery = Insert(
        {
            _revisionsSchema.HOME_RESOURCE_ID: Parameter("resourceID"),
            _revisionsSchema.RESOURCE_NAME: None,
            _revisionsSchema.REVISION: schema.REVISION_SEQ,
            _revisionsSchema.DELETED: False
        },
        Return=_revisionsSchema.REVISION)

    @inlineCallbacks
    def _initSyncToken(self):
        self._syncTokenRevision = (yield self._initSyncTokenQuery.on(
            self._txn, resourceID=self._resourceID))[0][0]

    _syncTokenQuery = Select(
        [Max(_revisionsSchema.REVISION)],
        From=_revisionsSchema,
        Where=_revisionsSchema.HOME_RESOURCE_ID == Parameter("resourceID"))

    @inlineCallbacks
    def syncToken(self):
        if self._syncTokenRevision is None:
            self._syncTokenRevision = yield self.syncTokenRevision()
        returnValue("%s_%s" % (self._resourceID, self._syncTokenRevision))

    @inlineCallbacks
    def syncTokenRevision(self):
        revision = (yield
                    self._syncTokenQuery.on(self._txn,
                                            resourceID=self._resourceID))[0][0]
        if revision is None:
            revision = int(
                (yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
        returnValue(revision)

    def properties(self):
        return self._propertyStore

    def addNotifier(self, factory_name, notifier):
        if self._notifiers is None:
            self._notifiers = {}
        self._notifiers[factory_name] = notifier

    def getNotifier(self, factory_name):
        return self._notifiers.get(factory_name)

    def notifierID(self):
        return (
            self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix,
            "%s/notification" % (self.ownerHome().uid(), ),
        )

    def parentNotifierID(self):
        return (
            self._txn._homeClass[self._txn._primaryHomeType]._notifierPrefix,
            "%s" % (self.ownerHome().uid(), ),
        )

    @inlineCallbacks
    def notifyChanged(self, category=ChangeCategory.default):
        """
        Send notifications, change sync token and bump last modified because
        the resource has changed.  We ensure we only do this once per object
        per transaction.
        """
        if self._txn.isNotifiedAlready(self):
            returnValue(None)
        self._txn.notificationAddedForObject(self)

        # Send notifications
        if self._notifiers:
            # cache notifiers run in post commit
            notifier = self._notifiers.get("cache", None)
            if notifier:
                self._txn.postCommit(notifier.notify)
            # push notifiers add their work items immediately
            notifier = self._notifiers.get("push", None)
            if notifier:
                yield notifier.notify(self._txn, priority=category.value)

        returnValue(None)

    @classproperty
    def _completelyNewRevisionQuery(cls):
        rev = cls._revisionsSchema
        return Insert(
            {
                rev.HOME_RESOURCE_ID:
                Parameter("homeID"),
                # rev.RESOURCE_ID: Parameter("resourceID"),
                rev.RESOURCE_NAME:
                Parameter("name"),
                rev.REVISION:
                schema.REVISION_SEQ,
                rev.DELETED:
                False
            },
            Return=rev.REVISION)

    def _maybeNotify(self):
        """
        Emit a push notification after C{_changeRevision}.
        """
        return self.notifyChanged()

    @inlineCallbacks
    def remove(self):
        """
        Remove DB rows corresponding to this notification home.
        """
        # Delete NOTIFICATION rows
        no = schema.NOTIFICATION
        kwds = {"ResourceID": self._resourceID}
        yield Delete(
            From=no,
            Where=(
                no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")),
        ).on(self._txn, **kwds)

        # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS)
        nh = schema.NOTIFICATION_HOME
        yield Delete(
            From=nh,
            Where=(nh.RESOURCE_ID == Parameter("ResourceID")),
        ).on(self._txn, **kwds)

    purge = remove
Exemplo n.º 16
0
class AMPPushMaster(object):
    """
    AMPPushNotifierService allows clients to use AMP to subscribe to,
    and receive, change notifications.
    """
    log = Logger()

    def __init__(
        self, controlSocket, parentService, port, enableStaggering,
        staggerSeconds, reactor=None
    ):
        if reactor is None:
            from twisted.internet import reactor
        from twisted.application.strports import service as strPortsService

        if port:
            # Service which listens for client subscriptions and sends
            # notifications to them
            strPortsService(
                str(port), AMPPushNotifierFactory(self),
                reactor=reactor).setServiceParent(parentService)

        if controlSocket is not None:
            # Set up the listener which gets notifications from the slaves
            controlSocket.addFactory(
                PUSH_ROUTE, AMPPushMasterListenerFactory(self)
            )

        self.subscribers = []

        if enableStaggering:
            self.scheduler = PushScheduler(
                reactor, self.sendNotification,
                staggerSeconds=staggerSeconds)
        else:
            self.scheduler = None

    def addSubscriber(self, p):
        self.log.debug("Added subscriber")
        self.subscribers.append(p)

    def removeSubscriber(self, p):
        self.log.debug("Removed subscriber")
        self.subscribers.remove(p)

    def enqueue(
        self, transaction, pushKey, dataChangedTimestamp=None,
        priority=PushPriority.high
    ):
        """
        Sends an AMP push notification to any clients subscribing to this pushKey.

        @param pushKey: The identifier of the resource that was updated, including
            a prefix indicating whether this is CalDAV or CardDAV related.

            "/CalDAV/abc/def/"

        @type pushKey: C{str}
        @param dataChangedTimestamp: Timestamp (epoch seconds) for the data change
            which triggered this notification (Only used for unit tests)
            @type key: C{int}
        """

        # Unit tests can pass this value in; otherwise it defaults to now
        if dataChangedTimestamp is None:
            dataChangedTimestamp = int(time.time())

        tokens = []
        for subscriber in self.subscribers:
            token = subscriber.subscribedToID(pushKey)
            if token is not None:
                tokens.append(token)
        if tokens:
            return self.scheduleNotifications(
                tokens, pushKey,
                dataChangedTimestamp, priority)

    @inlineCallbacks
    def sendNotification(self, token, id, dataChangedTimestamp, priority):
        for subscriber in self.subscribers:
            if subscriber.subscribedToID(id):
                yield subscriber.notify(
                    token, id, dataChangedTimestamp,
                    priority)

    @inlineCallbacks
    def scheduleNotifications(self, tokens, id, dataChangedTimestamp, priority):
        if self.scheduler is not None:
            self.scheduler.schedule(tokens, id, dataChangedTimestamp, priority)
        else:
            for token in tokens:
                yield self.sendNotification(
                    token, id, dataChangedTimestamp,
                    priority)
Exemplo n.º 17
0
# limitations under the License.
##

from twext.python.log import Logger

from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.names import dns
from twisted.names.authority import BindAuthority
from twisted.names.client import getResolver
from twisted.names.error import DomainError, AuthoritativeDomainError

from twistedcaldav.config import config

import socket

log = Logger()

DebugResolver = None


def getIPsFromHost(host):
    """
    Map a hostname to an IPv4 or IPv6 address.

    @param host: the hostname
    @type host: C{str}

    @return: a C{set} of IPs
    """
    ips = set()
    # Use AF_UNSPEC rather than iterating (socket.AF_INET, socket.AF_INET6)
Exemplo n.º 18
0
class DirectoryRecord(BaseDirectoryRecord, CalendarDirectoryRecordMixin):
    """
    Mac OS X Server Wiki directory record.
    """

    log = Logger()

    def __init__(self, service, fields):
        BaseDirectoryRecord.__init__(self, service, fields)
        CalendarDirectoryRecordMixin.__init__(self)

    @property
    def name(self):
        return self.shortNames[0]

    @inlineCallbacks
    def accessForRecord(self, record):
        """
        Look up the access level for a record in this wiki.

        @param user: The record to check access for.  A value of None means
            unauthenticated
        """
        if record is None:
            uid = u"unauthenticated"
        else:
            uid = record.uid

        try:
            # FIXME: accessForUserToWiki() API is lame.
            # There are no other callers except the old directory API, so
            # nuke it from the originating module and move that logic here
            # once the old API is removed.
            # When we do that note: isn't there a getPage() in twisted.web?

            self.log.debug("Wiki access check: {wiki}, {user}",
                           wiki=self.shortNames[0],
                           user=uid)
            access = yield accessForUserToWiki(
                uid.encode("utf-8"), self.shortNames[0].encode("utf-8"),
                self.service.endpointDescriptor)
            self.log.debug(
                "Wiki access result: {wiki}, {user}, {access}",
                wiki=self.shortNames[0],
                user=uid,
                access=access,
            )

        except MultiFailure as e:
            self.log.error(
                "Unable to look up access for record {record} "
                "in wiki {log_source}: {error}",
                record=record,
                error=e)
            returnValue(WikiAccessLevel.none)

        except WebError as e:
            status = int(e.status)

            if status == responsecode.FORBIDDEN:  # Unknown user
                self.log.debug("No such record (according to wiki): {record}",
                               record=record,
                               error=e)
                returnValue(WikiAccessLevel.none)

            if status == responsecode.NOT_FOUND:  # Unknown wiki
                self.log.error("No such wiki: {log_source.name}",
                               record=record,
                               error=e)
                returnValue(WikiAccessLevel.none)

            self.log.error("Unable to look up wiki access: {error}",
                           record=record,
                           error=e)
            returnValue(WikiAccessLevel.none)

        except TimeoutError as e:
            self.log.error("Wiki request timed out")
            returnValue(WikiAccessLevel.none)

        try:
            returnValue({
                "no-access": WikiAccessLevel.none,
                "read": WikiAccessLevel.read,
                "write": WikiAccessLevel.write,
                "admin": WikiAccessLevel.write,
            }[access])

        except KeyError:
            self.log.error("Unknown wiki access level: {level}", level=access)
            returnValue(WikiAccessLevel.none)
Exemplo n.º 19
0
from twistedcaldav.cache import _CachedResponseResource
from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
from twistedcaldav.cache import DisabledCache
from twistedcaldav.config import config
from twistedcaldav.extensions import DAVFile, CachingPropertyStore
from twistedcaldav.extensions import DirectoryPrincipalPropertySearchMixIn
from twistedcaldav.extensions import ReadOnlyResourceMixIn
from twistedcaldav.resource import CalDAVComplianceMixIn
from twistedcaldav.resource import CalendarHomeResource, AddressBookHomeResource
from twistedcaldav.directory.principal import DirectoryPrincipalResource
from twistedcaldav.storebridge import CalendarCollectionResource,\
    AddressBookCollectionResource, StoreNotificationCollectionResource
from calendarserver.platform.darwin.wiki import usernameForAuthToken

log = Logger()


class RootResource (ReadOnlyResourceMixIn, DirectoryPrincipalPropertySearchMixIn, CalDAVComplianceMixIn, DAVFile):
    """
    A special root resource that contains support checking SACLs
    as well as adding responseFilters.
    """

    useSacls = False

    # Mapping of top-level resource paths to SACLs.  If a request path
    # starts with any of these, then the list of SACLs are checked.  If the
    # request path does not start with any of these, then no SACLs are checked.
    saclMap = {
        "addressbooks" : ("addressbook",),
Exemplo n.º 20
0
WebDAV-aware static resources.
"""

__all__ = ["http_PROPPATCH"]

from twisted.python.failure import Failure
from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.http import HTTPError, StatusResponse
from txdav.xml import element as davxml
from txweb2.dav.http import MultiStatusResponse, PropertyStatusResponseQueue
from txweb2.dav.util import davXMLFromStream

log = Logger()


def http_PROPPATCH(self, request):
    """
    Respond to a PROPPATCH request. (RFC 2518, section 8.2)
    """
    if not self.exists():
        log.error("File not found: %s" % (self, ))
        raise HTTPError(responsecode.NOT_FOUND)

    x = waitForDeferred(self.authorize(request, (davxml.WriteProperties(), )))
    yield x
    x.getResult()

    #
Exemplo n.º 21
0
from twisted.python.reflect import namedClass
from twext.python.log import Logger


from calendarserver.provision.root import RootResource
from twistedcaldav import memcachepool
from twistedcaldav.config import config, ConfigurationError
from twistedcaldav.directory import augment, calendaruserproxy
from twistedcaldav.directory.aggregate import AggregateDirectoryService
from twistedcaldav.directory.directory import DirectoryService, DirectoryRecord
from twistedcaldav.notify import installNotificationClient
from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE

from txdav.common.datastore.file import CommonDataStore

log = Logger()

def loadConfig(configFileName):
    if configFileName is None:
        configFileName = DEFAULT_CONFIG_FILE

    if not os.path.isfile(configFileName):
        raise ConfigurationError("No config file: %s" % (configFileName,))

    config.load(configFileName)

    return config

def getDirectory():

    class MyDirectoryService (AggregateDirectoryService):
Exemplo n.º 22
0
class AbstractCalendarIndex(AbstractSQLDatabase):
    """
    Calendar collection index abstract base class that defines the apis for the index.
    This will be subclassed for the two types of index behaviour we need: one for
    regular calendar collections, one for schedule calendar collections.
    """
    log = Logger()

    def __init__(self, resource):
        """
        @param resource: the L{CalDAVResource} resource to
            index. C{resource} must be a calendar collection (ie.
            C{resource.isPseudoCalendarCollection()} returns C{True}.)
        """
        self.resource = resource
        db_filename = self.resource.fp.child(db_basename).path
        super(AbstractCalendarIndex, self).__init__(db_filename, False)

    def create(self):
        """
        Create the index and initialize it.
        """
        self._db()

    def reserveUID(self, uid):
        """
        Reserve a UID for this index's resource.
        @param uid: the UID to reserve
        @raise ReservationError: if C{uid} is already reserved
        """
        raise NotImplementedError

    def unreserveUID(self, uid):
        """
        Unreserve a UID for this index's resource.
        @param uid: the UID to reserve
        @raise ReservationError: if C{uid} is not reserved
        """
        raise NotImplementedError

    def isReservedUID(self, uid):
        """
        Check to see whether a UID is reserved.
        @param uid: the UID to check
        @return: True if C{uid} is reserved, False otherwise.
        """
        raise NotImplementedError

    def isAllowedUID(self, uid, *names):
        """
        Checks to see whether to allow an operation with adds the the specified
        UID is allowed to the index.  Specifically, the operation may not
        violate the constraint that UIDs must be unique, and the UID must not
        be reserved.
        @param uid: the UID to check
        @param names: the names of resources being replaced or deleted by the
            operation; UIDs associated with these resources are not checked.
        @return: True if the UID is not in the index and is not reserved,
            False otherwise.
        """
        raise NotImplementedError

    def resourceNamesForUID(self, uid):
        """
        Looks up the names of the resources with the given UID.
        @param uid: the UID of the resources to look up.
        @return: a list of resource names
        """
        names = self._db_values_for_sql(
            "select NAME from RESOURCE where UID = :1", uid)

        #
        # Check that each name exists as a child of self.resource.  If not, the
        # resource record is stale.
        #
        resources = []
        for name in names:
            name_utf8 = name.encode("utf-8")
            if name is not None and self.resource.getChild(name_utf8) is None:
                # Clean up
                log.error(
                    "Stale resource record found for child %s with UID %s in %s"
                    % (name, uid, self.resource))
                self._delete_from_db(name, uid, False)
                self._db_commit()
            else:
                resources.append(name_utf8)

        return resources

    def resourceNameForUID(self, uid):
        """
        Looks up the name of the resource with the given UID.
        @param uid: the UID of the resource to look up.
        @return: If the resource is found, its name; C{None} otherwise.
        """
        result = None

        for name in self.resourceNamesForUID(uid):
            assert result is None, "More than one resource with UID %s in calendar collection %r" % (
                uid, self)
            result = name

        return result

    def resourceUIDForName(self, name):
        """
        Looks up the UID of the resource with the given name.
        @param name: the name of the resource to look up.
        @return: If the resource is found, the UID of the resource; C{None}
            otherwise.
        """
        uid = self._db_value_for_sql(
            "select UID from RESOURCE where NAME = :1", name)

        return uid

    def componentTypeCounts(self):
        """
        Count each type of component.
        """
        return self._db_execute(
            "select TYPE, COUNT(TYPE) from RESOURCE group by TYPE")

    def addResource(self, name, calendar, fast=False, reCreate=False):
        """
        Adding or updating an existing resource.
        To check for an update we attempt to get an existing UID
        for the resource name. If present, then the index entries for
        that UID are removed. After that the new index entries are added.
        @param name: the name of the resource to add.
        @param calendar: a L{Calendar} object representing the resource
            contents.
        @param fast: if C{True} do not do commit, otherwise do commit.
        """
        oldUID = self.resourceUIDForName(name)
        if oldUID is not None:
            self._delete_from_db(name, oldUID, False)
        self._add_to_db(name, calendar, reCreate=reCreate)
        if not fast:
            self._db_commit()

    def deleteResource(self, name):
        """
        Remove this resource from the index.
        @param name: the name of the resource to add.
        @param uid: the UID of the calendar component in the resource.
        """
        uid = self.resourceUIDForName(name)
        if uid is not None:
            self._delete_from_db(name, uid)
            self._db_commit()

    def resourceExists(self, name):
        """
        Determines whether the specified resource name exists in the index.
        @param name: the name of the resource to test
        @return: True if the resource exists, False if not
        """
        uid = self._db_value_for_sql(
            "select UID from RESOURCE where NAME = :1", name)
        return uid is not None

    def resourcesExist(self, names):
        """
        Determines whether the specified resource name exists in the index.
        @param names: a C{list} containing the names of the resources to test
        @return: a C{list} of all names that exist
        """
        statement = "select NAME from RESOURCE where NAME in ("
        for ctr in (item[0] for item in enumerate(names)):
            if ctr != 0:
                statement += ", "
            statement += ":%s" % (ctr, )
        statement += ")"
        results = self._db_values_for_sql(statement, *names)
        return results

    def testAndUpdateIndex(self, minDate):
        # Find out if the index is expanded far enough
        names = self.notExpandedBeyond(minDate)
        # Actually expand recurrence max
        for name in names:
            self.log.info("Search falls outside range of index for %s %s" %
                          (name, minDate))
            self.reExpandResource(name, minDate)

    def whatchanged(self, revision):

        results = [
            (name.encode("utf-8"), deleted)
            for name, deleted in self._db_execute(
                "select NAME, DELETED from REVISIONS where REVISION > :1",
                revision)
        ]
        results.sort(key=lambda x: x[1])

        changed = []
        deleted = []
        invalid = []
        for name, wasdeleted in results:
            if name:
                if wasdeleted == 'Y':
                    if revision:
                        deleted.append(name)
                else:
                    changed.append(name)
            else:
                raise SyncTokenValidException

        return (changed, deleted, invalid)

    def lastRevision(self):
        return self._db_value_for_sql("select REVISION from REVISION_SEQUENCE")

    def bumpRevision(self, fast=False):
        self._db_execute(
            """
            update REVISION_SEQUENCE set REVISION = REVISION + 1
            """, )
        self._db_commit()
        return self._db_value_for_sql(
            """
            select REVISION from REVISION_SEQUENCE
            """, )

    def indexedSearch(self, filter, useruid="", fbtype=False):
        """
        Finds resources matching the given qualifiers.
        @param filter: the L{Filter} for the calendar-query to execute.
        @return: an iterable of tuples for each resource matching the
            given C{qualifiers}. The tuples are C{(name, uid, type)}, where
            C{name} is the resource name, C{uid} is the resource UID, and
            C{type} is the resource iCalendar component type.
        """

        # Make sure we have a proper Filter element and get the partial SQL
        # statement to use.
        if isinstance(filter, Filter):
            if fbtype:
                # Lookup the useruid - try the empty (default) one if needed
                dbuseruid = self._db_value_for_sql(
                    "select PERUSERID from PERUSER where USERUID == :1",
                    useruid,
                )
            else:
                dbuseruid = ""

            qualifiers = sqlcalendarquery(filter, None, dbuseruid, fbtype)
            if qualifiers is not None:
                # Determine how far we need to extend the current expansion of
                # events. If we have an open-ended time-range we will expand one
                # year past the start. That should catch bounded recurrences - unbounded
                # will have been indexed with an "infinite" value always included.
                maxDate, isStartDate = filter.getmaxtimerange()
                if maxDate:
                    maxDate = maxDate.duplicate()
                    maxDate.setDateOnly(True)
                    if isStartDate:
                        maxDate += Duration(days=365)
                    self.testAndUpdateIndex(maxDate)
            else:
                # We cannot handle this filter in an indexed search
                raise IndexedSearchException()

        else:
            qualifiers = None

        # Perform the search
        if qualifiers is None:
            rowiter = self._db_execute("select NAME, UID, TYPE from RESOURCE")
        else:
            if fbtype:
                # For a free-busy time-range query we return all instances
                rowiter = self._db_execute(
                    "select DISTINCT RESOURCE.NAME, RESOURCE.UID, RESOURCE.TYPE, RESOURCE.ORGANIZER, TIMESPAN.FLOAT, TIMESPAN.START, TIMESPAN.END, TIMESPAN.FBTYPE, TIMESPAN.TRANSPARENT, TRANSPARENCY.TRANSPARENT"
                    + qualifiers[0], *qualifiers[1])
            else:
                rowiter = self._db_execute(
                    "select DISTINCT RESOURCE.NAME, RESOURCE.UID, RESOURCE.TYPE"
                    + qualifiers[0], *qualifiers[1])

        # Check result for missing resources
        results = []
        for row in rowiter:
            name = row[0]
            if self.resource.getChild(name.encode("utf-8")):
                if fbtype:
                    row = list(row)
                    if row[9]:
                        row[8] = row[9]
                    del row[9]
                results.append(row)
            else:
                log.error(
                    "Calendar resource %s is missing from %s. Removing from index."
                    % (name, self.resource))
                self.deleteResource(name)

        return results

    def bruteForceSearch(self):
        """
        List the whole index and tests for existence, updating the index
        @return: all resources in the index
        """
        # List all resources
        rowiter = self._db_execute("select NAME, UID, TYPE from RESOURCE")

        # Check result for missing resources:

        results = []
        for row in rowiter:
            name = row[0]
            if self.resource.getChild(name.encode("utf-8")):
                results.append(row)
            else:
                log.error(
                    "Calendar resource %s is missing from %s. Removing from index."
                    % (name, self.resource))
                self.deleteResource(name)

        return results

    def _db_version(self):
        """
        @return: the schema version assigned to this index.
        """
        return schema_version

    def _add_to_db(self,
                   name,
                   calendar,
                   cursor=None,
                   expand_until=None,
                   reCreate=False):
        """
        Records the given calendar resource in the index with the given name.
        Resource names and UIDs must both be unique; only one resource name may
        be associated with any given UID and vice versa.
        NB This method does not commit the changes to the db - the caller
        MUST take care of that
        @param name: the name of the resource to add.
        @param calendar: a L{Calendar} object representing the resource
            contents.
        """
        raise NotImplementedError

    def _delete_from_db(self, name, uid, dorevision=True):
        """
        Deletes the specified entry from all dbs.
        @param name: the name of the resource to delete.
        @param uid: the uid of the resource to delete.
        """
        raise NotImplementedError
Exemplo n.º 23
0
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.resource import WebAdminResource
from calendarserver.webcal.resource import WebCalendarResource

from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.sql import current_sql_schema
from twext.python.filepath import CachingFilePath
from urllib import quote
from twisted.python.usage import UsageError


log = Logger()


def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
    """
    Construct a L{PostgresService} from a given configuration and subservice.

    @param config: the configuration to derive postgres configuration
        parameters from.

    @param subServiceFactory: A factory for the service to start once the
        L{PostgresService} has been initialized.

    @param uid: The user-ID to run the PostgreSQL server as.

    @param gid: The group-ID to run the PostgreSQL server as.
Exemplo n.º 24
0
from os import close
from errno import EAGAIN, ENOBUFS
from socket import (socketpair, fromfd, error as SocketError, AF_UNIX,
                    SOCK_STREAM, SOCK_DGRAM)

from zope.interface import Interface

from twisted.python.sendmsg import send1msg, recv1msg, getsockfam
from twisted.internet.abstract import FileDescriptor
from twisted.internet.protocol import Protocol, Factory

from twext.python.log import Logger
from twext.python.sendfd import sendfd, recvfd

log = Logger()


class InheritingProtocol(Protocol, object):
    """
    When a connection comes in on this protocol, stop reading and writing, and
    dispatch the socket to another process via its factory.
    """
    def connectionMade(self):
        """
        A connection was received; transmit the file descriptor to another
        process via L{InheritingProtocolFactory} and remove my transport from
        the reactor.
        """
        self.transport.stopReading()
        self.transport.stopWriting()
Exemplo n.º 25
0
from Crypto.Hash import SHA, SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5

import base64
import hashlib
import os
import textwrap
import time
import uuid

"""
DKIM HTTP message generation and validation,
"""

log = Logger()

# DKIM/iSchedule Constants
RSA1 = "rsa-sha1"
RSA256 = "rsa-sha256"
Q_DNS = "dns/txt"
Q_HTTP = "http/well-known"
Q_PRIVATE = "private-exchange"

KEY_SERVICE_TYPE = "ischedule"

# Headers
DKIM_SIGNATURE = "DKIM-Signature"
ISCHEDULE_VERSION = "iSchedule-Version"
ISCHEDULE_VERSION_VALUE = "1.0"
ISCHEDULE_MESSAGE_ID = "iSchedule-Message-ID"
Exemplo n.º 26
0
class BaseResponseCache(object):
    """
    A base class which provides some common operations
    """
    log = Logger()

    def _principalURI(self, principal):
        return principal.principalURL(
        ) if principal is not None else "unauthenticated"

    def _uriNotFound(self, f, uri):
        f.trap(AttributeError)
        raise URINotFoundException(uri)

    def _getRecordForURI(self, uri, request):
        """
        Return the directory record for the specified principal uri.
        """
        def _getRecord(resrc):
            if hasattr(resrc, 'record'):
                return resrc.record

        try:
            return request.locateResource(uri).addCallback(
                _getRecord).addErrback(self._uriNotFound, uri)
        except AssertionError:
            raise URINotFoundException(uri)

    @inlineCallbacks
    def _canonicalizeURIForRequest(self, uri, request):
        """
        Always use canonicalized forms of the URIs for caching (i.e. __uids__ paths).

        Do this without calling locateResource which may cause a query on the store.
        """

        uribits = uri.split("/")
        if len(uribits) > 1 and uribits[1] in ("principals", "calendars",
                                               "addressbooks"):
            if uribits[2] == "__uids__":
                returnValue(uri)
            else:
                recordType = uribits[2]
                recordName = uribits[3]
                directory = request.site.resource.getDirectory()
                record = yield directory.recordWithShortName(
                    directory.oldNameToRecordType(recordType), recordName)
                if record is not None:
                    uribits[2] = "__uids__"
                    uribits[3] = record.uid.encode("utf-8")
                    returnValue("/".join(uribits))

        # Fall back to the locateResource approach
        try:
            resrc = yield request.locateResource(uri)
            returnValue(resrc.url())
        except AssertionError:
            raise URINotFoundException(uri)

    def _getURIs(self, request):
        """
        Get principal and resource URIs from the request.
        """
        def _getSecondURI(rURI):
            return self._canonicalizeURIForRequest(
                self._principalURI(request.authnUser),
                request).addCallback(lambda pURI: (pURI, rURI))

        d = self._canonicalizeURIForRequest(request.uri, request)
        d.addCallback(_getSecondURI)

        return d

    @inlineCallbacks
    def _requestKey(self, request):
        """
        Get a key for this request. This depends on the method, Depth: header, authn user principal,
        request uri and a hash of the request body (the body being normalized for property order).
        """
        requestBody = (yield allDataFromStream(request.stream))
        if requestBody is not None:
            # Give it back to the request so it can be read again
            request.stream = MemoryStream(requestBody)
            request.stream.doStartReading = None

            # Normalize the property order by doing a "dumb" sort on lines
            requestLines = requestBody.splitlines()
            requestLines.sort()
            requestBody = "\n".join(requestLines)

        request.cacheKey = (request.method,
                            self._principalURI(request.authnUser), request.uri,
                            request.headers.getHeader('depth'),
                            hash(requestBody))

        returnValue(request.cacheKey)

    def _getResponseBody(self, key, response):
        d1 = allDataFromStream(response.stream)
        d1.addCallback(lambda responseBody: (key, responseBody))
        return d1
Exemplo n.º 27
0
]

import string

from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from twext.web2 import responsecode
from twext.web2.http import HTTPError, StatusResponse
from twext.web2.dav.http import ErrorResponse
from twext.web2.dav.util import davXMLFromStream
from txdav.xml import element as davxml
from txdav.xml.element import lookupElement
from txdav.xml.base import encodeXMLName

log = Logger()


max_number_of_matches = 500

class NumberOfMatchesWithinLimits(Exception):
    
    def __init__(self, limit):
        
        super(NumberOfMatchesWithinLimits, self).__init__()
        self.limit = limit
        
    def maxLimit(self):
        return self.limit

def http_REPORT(self, request):
Exemplo n.º 28
0
from twext.enterprise.dal.record import fromTable
from twext.enterprise.dal.syntax import Select
from twext.enterprise.jobs.workitem import AggregatedWorkItem, RegeneratingWorkItem
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
    DeferredList
from twistedcaldav.config import config
from txdav.caldav.datastore.sql import CalendarStoreFeatures
from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
from txdav.common.datastore.sql_directory import GroupsRecord
from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
import datetime
import itertools
import time

log = Logger()


class GroupCacherPollingWork(RegeneratingWorkItem,
                             fromTable(schema.GROUP_CACHER_POLLING_WORK)):

    group = "group_cacher_polling"

    @classmethod
    def initialSchedule(cls, store, seconds):
        def _enqueue(txn):
            return GroupCacherPollingWork.reschedule(txn, seconds)

        if config.GroupCaching.Enabled:
            return store.inTransaction(
                "GroupCacherPollingWork.initialSchedule", _enqueue)
Exemplo n.º 29
0
    "propertyName",
]

from twisted.python.failure import Failure
from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from twext.web2.http import HTTPError
from twext.web2 import responsecode
from twext.web2.http import StatusResponse
from txdav.xml import element as davxml
from twext.web2.dav.http import MultiStatusResponse, statusForFailure, \
    ErrorResponse
from twext.web2.dav.util import normalizeURL, davXMLFromStream

log = Logger()


def http_PROPFIND(self, request):
    """
    Respond to a PROPFIND request. (RFC 2518, section 8.1)
    """
    if not self.exists():
        log.err("File not found: %s" % (self,))
        raise HTTPError(responsecode.NOT_FOUND)

    #
    # Check authentication and access controls
    #
    x = waitForDeferred(self.authorize(request, (davxml.Read(),)))
    yield x
Exemplo n.º 30
0
class GroupCacher(object):
    log = Logger()

    def __init__(
        self,
        directory,
        updateSeconds=600,
        useDirectoryBasedDelegates=False,
        directoryBasedDelegatesSource=None,
        cacheNotifier=None,
    ):
        self.directory = directory
        self.useDirectoryBasedDelegates = useDirectoryBasedDelegates
        if useDirectoryBasedDelegates and directoryBasedDelegatesSource is None:
            directoryBasedDelegatesSource = self.directory.recordsWithDirectoryBasedDelegates
        self.directoryBasedDelegatesSource = directoryBasedDelegatesSource
        self.cacheNotifier = cacheNotifier
        self.updateSeconds = updateSeconds

    @inlineCallbacks
    def update(self, txn):

        if self.useDirectoryBasedDelegates:
            # Pull in delegate assignments from the directory and stick them
            # into the delegate db
            recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource(
            )
            externalAssignments = {}
            for record in recordsWithDirectoryBasedDelegates:
                try:
                    readWriteProxy = record.readWriteProxy
                except AttributeError:
                    readWriteProxy = None
                try:
                    readOnlyProxy = record.readOnlyProxy
                except AttributeError:
                    readOnlyProxy = None

                if readOnlyProxy or readWriteProxy:
                    externalAssignments[record.uid] = (readOnlyProxy,
                                                       readWriteProxy)

            yield self.scheduleExternalAssignments(txn, externalAssignments)

        # Figure out which groups matter
        groupUIDs = yield self.groupsToRefresh(txn)
        # self.log.debug(
        #     "Groups to refresh: {g}", g=groupUIDs
        # )

        if config.AutomaticPurging.Enabled and groupUIDs:
            # remove unused groups and groups that have not been seen in a while
            dateLimit = (
                datetime.datetime.utcnow() - datetime.timedelta(seconds=float(
                    config.AutomaticPurging.GroupPurgeIntervalSeconds)))
            rows = yield GroupsRecord.deletesome(
                txn,
                ((GroupsRecord.extant == 0).And(
                    GroupsRecord.modified < dateLimit)).Or(
                        GroupsRecord.groupUID.NotIn(groupUIDs)),
                returnCols=GroupsRecord.groupUID,
            )
        else:
            # remove unused groups
            rows = yield GroupsRecord.deletesome(
                txn,
                GroupsRecord.groupUID.NotIn(groupUIDs) if groupUIDs else None,
                returnCols=GroupsRecord.groupUID,
            )
        deletedGroupUIDs = [row[0] for row in rows]
        if deletedGroupUIDs:
            self.log.debug("Deleted old or unused groups {d}",
                           d=deletedGroupUIDs)

        # For each of those groups, create a per-group refresh work item
        for groupUID in set(groupUIDs) - set(deletedGroupUIDs):
            self.log.debug("Enqueuing group refresh for {u}", u=groupUID)
            yield GroupRefreshWork.reschedule(txn, 0, groupUID=groupUID)

    @inlineCallbacks
    def scheduleExternalAssignments(self,
                                    txn,
                                    newAssignments,
                                    immediately=False):

        oldAssignments = yield txn.externalDelegates()

        # external assignments is of the form:
        # { delegatorUID: (readDelegateGroupUID, writeDelegateGroupUID),
        # }

        changed, removed = diffAssignments(oldAssignments, newAssignments)
        if changed:
            for (delegatorUID, (readDelegateUID, writeDelegateUID)) in changed:
                self.log.debug(
                    "Scheduling external delegate assignment changes for {uid}",
                    uid=delegatorUID)
                if not readDelegateUID:
                    readDelegateUID = ""
                if not writeDelegateUID:
                    writeDelegateUID = ""
                if immediately:
                    yield self.applyExternalAssignments(
                        txn, delegatorUID, readDelegateUID, writeDelegateUID)
                else:
                    yield GroupDelegateChangesWork.reschedule(
                        txn,
                        0,
                        delegatorUID=delegatorUID,
                        readDelegateUID=readDelegateUID,
                        writeDelegateUID=writeDelegateUID)
        if removed:
            for delegatorUID in removed:
                self.log.debug(
                    "Scheduling external delegation assignment removal for {uid}",
                    uid=delegatorUID)
                if immediately:
                    yield self.applyExternalAssignments(
                        txn, delegatorUID, "", "")
                else:
                    yield GroupDelegateChangesWork.reschedule(
                        txn,
                        0,
                        delegatorUID=delegatorUID,
                        readDelegateUID="",
                        writeDelegateUID="")

    @inlineCallbacks
    def applyExternalAssignments(self, txn, delegatorUID, readDelegateUID,
                                 writeDelegateUID):
        self.log.debug("External delegate assignments changed for {uid}",
                       uid=delegatorUID)
        readDelegateGroupID = writeDelegateGroupID = None

        if readDelegateUID:
            readDelegateGroup = yield txn.groupByUID(readDelegateUID)
            if readDelegateGroup is None:
                # The group record does not actually exist
                readDelegateUID = None
            else:
                readDelegateGroupID = readDelegateGroup.groupID

        if writeDelegateUID:
            writeDelegateGroup = yield txn.groupByUID(writeDelegateUID)
            if writeDelegateGroup is None:
                # The group record does not actually exist
                writeDelegateUID = None
            else:
                writeDelegateGroupID = writeDelegateGroup.groupID

        yield txn.assignExternalDelegates(delegatorUID, readDelegateGroupID,
                                          writeDelegateGroupID,
                                          readDelegateUID, writeDelegateUID)

    @inlineCallbacks
    def refreshGroup(self, txn, groupUID):
        """
            Does the work of a per-group refresh work item
            Faults in the flattened membership of a group, as UIDs
            and updates the GROUP_MEMBERSHIP table
            WorkProposal is returned for tests
        """
        self.log.debug("Refreshing group: {g}", g=groupUID)

        record = (yield self.directory.recordWithUID(groupUID))
        if record is None:
            # the group has disappeared from the directory
            self.log.info("Group is missing: {g}", g=groupUID)
        else:
            self.log.debug("Got group record: {u}", u=record.uid)

        group = yield txn.groupByUID(groupUID, create=(record is not None))

        if group:
            membershipChanged, addedUIDs, removedUIDs = yield txn.refreshGroup(
                group, record)

            if membershipChanged:
                self.log.info(
                    "Membership changed for group {uid} {name}:\n\tadded {added}\n\tremoved {removed}",
                    uid=group.groupUID,
                    name=group.name,
                    added=",".join(addedUIDs),
                    removed=",".join(removedUIDs),
                )

                # Send cache change notifications
                if self.cacheNotifier is not None:
                    self.cacheNotifier.changed(group.groupUID)
                    for uid in itertools.chain(addedUIDs, removedUIDs):
                        self.cacheNotifier.changed(uid)

                # Notifier other store APIs of changes
                wpsAttendee = yield self.scheduleGroupAttendeeReconciliations(
                    txn, group.groupID)
                wpsShareee = yield self.scheduleGroupShareeReconciliations(
                    txn, group.groupID)

                returnValue(wpsAttendee + wpsShareee)
            else:
                self.log.debug("No membership change for group {uid} {name}",
                               uid=group.groupUID,
                               name=group.name)

        returnValue(tuple())

    def synchronizeMembers(self, txn, groupID, newMemberUIDs):
        return txn.synchronizeMembers(groupID, newMemberUIDs)

    def cachedMembers(self, txn, groupID):
        """
        The members of the given group as recorded in the db
        """
        return txn.groupMembers(groupID)

    def cachedGroupsFor(self, txn, uid):
        """
        The UIDs of the groups the uid is a member of
        """
        return txn.groupUIDsFor(uid)

    @inlineCallbacks
    def scheduleGroupAttendeeReconciliations(self, txn, groupID):
        """
        Find all events who have this groupID as an attendee and create
        work items for them.
        returns: WorkProposal
        """

        records = yield GroupAttendeeRecord.querysimple(txn, groupID=groupID)

        workItems = []
        for record in records:
            work = yield GroupAttendeeReconciliationWork.reschedule(
                txn,
                seconds=float(
                    config.GroupAttendees.ReconciliationDelaySeconds),
                resourceID=record.resourceID,
                groupID=groupID,
            )
            workItems.append(work)
        returnValue(tuple(workItems))

    @inlineCallbacks
    def scheduleGroupShareeReconciliations(self, txn, groupID):
        """
        Find all calendars who have shared to this groupID and create
        work items for them.
        returns: WorkProposal
        """
        gs = schema.GROUP_SHAREE
        rows = yield Select(
            [
                gs.CALENDAR_ID,
            ],
            From=gs,
            Where=gs.GROUP_ID == groupID,
        ).on(txn)

        workItems = []
        for [calendarID] in rows:
            work = yield GroupShareeReconciliationWork.reschedule(
                txn,
                seconds=float(config.Sharing.Calendars.Groups.
                              ReconciliationDelaySeconds),
                calendarID=calendarID,
                groupID=groupID,
            )
            workItems.append(work)
        returnValue(tuple(workItems))

    @inlineCallbacks
    def groupsToRefresh(self, txn):
        delegatedUIDs = set((yield txn.allGroupDelegates()))
        self.log.debug("There are {count} group delegates",
                       count=len(delegatedUIDs))

        # Also get group delegates from other pods
        if (txn.directoryService().serversDB() is not None and
                len(txn.directoryService().serversDB().allServersExceptThis(
                    filter_v5=True)) != 0):
            results = yield DeferredList([
                txn.store().conduit.send_all_group_delegates(txn, server)
                for server in txn.directoryService().serversDB().
                allServersExceptThis(filter_v5=True)
            ],
                                         consumeErrors=True)
            for result in results:
                if result and result[0]:
                    delegatedUIDs.update(result[1])
            self.log.debug(
                "There are {count} group delegates on this and other pods",
                count=len(delegatedUIDs))

        # Get groupUIDs for all group attendees
        groups = yield GroupsRecord.query(
            txn,
            GroupsRecord.groupID.In(
                GroupAttendeeRecord.queryExpr(
                    expr=None,
                    attributes=(GroupAttendeeRecord.groupID, ),
                    distinct=True,
                )))
        attendeeGroupUIDs = frozenset([group.groupUID for group in groups])
        self.log.debug("There are {count} group attendees",
                       count=len(attendeeGroupUIDs))

        # Get groupUIDs for all group shares
        gs = schema.GROUP_SHAREE
        gr = schema.GROUPS
        rows = yield Select([gr.GROUP_UID],
                            From=gr,
                            Where=gr.GROUP_ID.In(
                                Select([gs.GROUP_ID], From=gs,
                                       Distinct=True))).on(txn)
        shareeGroupUIDs = frozenset([row[0] for row in rows])
        self.log.debug("There are {count} group sharees",
                       count=len(shareeGroupUIDs))

        returnValue(
            frozenset(delegatedUIDs | attendeeGroupUIDs | shareeGroupUIDs))
Exemplo n.º 31
0
import string

from twisted.internet.defer import inlineCallbacks, returnValue
from txweb2 import responsecode
from txweb2.http import HTTPError, StatusResponse
from txweb2.dav.util import davXMLFromStream
from txdav.xml import element as davxml
from txdav.xml.base import encodeXMLName
from txdav.xml.element import lookupElement

from twext.python.log import Logger
from txweb2.dav.http import ErrorResponse

from twistedcaldav import caldavxml

log = Logger()


@inlineCallbacks
def http_REPORT(self, request):
    """
    Respond to a REPORT request. (RFC 3253, section 3.6)
    """
    if not self.exists():
        log.error("Resource not found: %s" % (self,))
        raise HTTPError(responsecode.NOT_FOUND)

    #
    # Read request body
    #
    try:
Exemplo n.º 32
0
from twext.python.log import Logger

from twisted.internet import reactor, protocol
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.web import http_headers
from twisted.web.client import Agent
from twisted.web.http import MOVED_PERMANENTLY, TEMPORARY_REDIRECT, FOUND

from urlparse import urlparse
from urlparse import urlunparse

__all__ = [
    "getURL",
]

log = Logger()


class AccumulatingProtocol(protocol.Protocol):
    """
    L{AccumulatingProtocol} is an L{IProtocol} implementation which collects
    the data delivered to it and can fire a Deferred when it is connected or
    disconnected.

    @ivar made: A flag indicating whether C{connectionMade} has been called.
    @ivar data: A string giving all the data passed to C{dataReceived}.
    @ivar closed: A flag indicated whether C{connectionLost} has been called.
    @ivar closedReason: The value of the I{reason} parameter passed to
        C{connectionLost}.
    @ivar closedDeferred: If set to a L{Deferred}, this will be fired when
        C{connectionLost} is called.
Exemplo n.º 33
0
from twext.enterprise.dal.record import fromTable
from twext.enterprise.dal.syntax import Select
from twext.enterprise.jobs.workitem import AggregatedWorkItem, RegeneratingWorkItem
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
    DeferredList
from twistedcaldav.config import config
from txdav.caldav.datastore.sql import CalendarStoreFeatures
from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
from txdav.common.datastore.sql_directory import GroupsRecord
from txdav.common.datastore.sql_tables import schema, _BIND_MODE_OWN
import datetime
import itertools
import time

log = Logger()


class GroupCacherPollingWork(
    RegeneratingWorkItem,
    fromTable(schema.GROUP_CACHER_POLLING_WORK)
):

    group = "group_cacher_polling"

    @classmethod
    def initialSchedule(cls, store, seconds):
        def _enqueue(txn):
            return GroupCacherPollingWork.reschedule(txn, seconds)

        if config.GroupCaching.Enabled:
Exemplo n.º 34
0
__all__ = ["http_PROPFIND"]

from twisted.python.failure import Failure
from twisted.internet.defer import inlineCallbacks, returnValue
from txweb2.http import HTTPError
from txweb2 import responsecode
from txweb2.http import StatusResponse
from txdav.xml import element as davxml
from txweb2.dav.http import MultiStatusResponse, statusForFailure, \
    ErrorResponse
from txweb2.dav.util import normalizeURL, davXMLFromStream, parentForURL

from twext.python.log import Logger

log = Logger()
"""
This is a direct copy of the twisted implementation of PROPFIND except that it uses the
findChildrenFaster method to optimize child privilege checking.
"""


@inlineCallbacks
def http_PROPFIND(self, request):
    """
    Respond to a PROPFIND request. (RFC 2518, section 8.1)
    """
    if not self.exists():
        # Return 403 if parent does not allow Bind
        parentURL = parentForURL(request.uri)
        parent = (yield request.locateResource(parentURL))
Exemplo n.º 35
0
    def emit(self, level, message, *args, **kwargs):
        if message.startswith("Unhandled unsolicited response:"):
            return

        Logger.emit(self, level, message, *args, **kwargs)
Exemplo n.º 36
0
class ScheduleResponseQueue(object):
    """
    Stores a list of (typically error) responses for use in a
    L{ScheduleResponse}.
    """
    log = Logger()

    schedule_response_element = caldavxml.ScheduleResponse
    response_element = caldavxml.Response
    recipient_element = caldavxml.Recipient
    recipient_uses_href = True
    request_status_element = caldavxml.RequestStatus
    error_element = davxml.Error
    response_description_element = davxml.ResponseDescription
    calendar_data_element = caldavxml.CalendarData

    ScheduleResonseDetails = namedtuple("ScheduleResonseDetails", [
        "recipient",
        "reqstatus",
        "calendar",
        "error",
        "message",
    ])

    def __init__(self, method, success_response, recipient_mapper=None):
        """
        @param method: the name of the method generating the queue.
        @param success_response: the response to return in lieu of a
            L{ScheduleResponse} if no responses are added to this queue.
        """
        self.responses = []
        self.method = method
        self.success_response = success_response
        self.recipient_mapper = recipient_mapper
        self.location = None

    def setLocation(self, location):
        """
        @param location:      the value of the location header to return in the response,
            or None.
        """
        self.location = location

    def add(self,
            recipient,
            what,
            reqstatus=None,
            calendar=None,
            suppressErrorLog=False):
        """
        Add a response.
        @param recipient: the recipient for this response.
        @param what: a status code or a L{Failure} for the given recipient.
        @param status: the iTIP request-status for the given recipient.
        @param calendar: the calendar data for the given recipient response.
        @param suppressErrorLog: whether to suppress a log message for errors; primarily
            this is used when trying to process a VFREEBUSY over iMIP, which isn't
            supported.
        """
        if type(what) is int:
            code = what
            error = None
            message = responsecode.RESPONSES[code]
        elif isinstance(what, Failure):
            code = statusForFailure(what)
            error = self.errorForFailure(what)
            message = messageForFailure(what)
        else:
            raise AssertionError("Unknown data type: {}".format(what, ))

        if self.recipient_mapper is not None:
            recipient = self.recipient_mapper(recipient)

        if not suppressErrorLog and code > 400:  # Error codes only
            self.log.error(
                "Error during {method} for {r}: {msg}",
                method=self.method,
                r=recipient,
                msg=message,
            )

        details = ScheduleResponseQueue.ScheduleResonseDetails(
            self.recipient_element(davxml.HRef.fromString(recipient))
            if self.recipient_uses_href else
            self.recipient_element.fromString(recipient),
            self.request_status_element(reqstatus),
            calendar,
            error,
            self.response_description_element(message)
            if message is not None else None,
        )
        self.responses.append(details)

    def errorForFailure(self, failure):
        if failure.check(HTTPError) and isinstance(failure.value.response,
                                                   ErrorResponse):
            return self.error_element(failure.value.response.error)
        else:
            return None

    def clone(self, recipient, request_status, calendar_data, error, desc):
        """
        Add a response cloned from existing data.
        @param clone: the response to clone.
        """

        details = ScheduleResponseQueue.ScheduleResonseDetails(
            self.recipient_element(davxml.HRef.fromString(recipient))
            if self.recipient_uses_href else
            self.recipient_element.fromString(recipient),
            self.request_status_element.fromString(request_status),
            calendar_data,
            self.error_element(*error) if error is not None else None,
            self.response_description_element.fromString(desc)
            if desc is not None else None,
        )
        self.responses.append(details)

    def response(self, format=None):
        """
        Generate a L{ScheduleResponseResponse} with the responses contained in the
        queue or, if no such responses, return the C{success_response} provided
        to L{__init__}.
        @return: the response.
        """
        if self.responses:
            # Convert our queue to all XML elements
            xml_responses = []
            for response in self.responses:
                children = []
                children.append(response.recipient)
                children.append(response.reqstatus)
                if response.calendar is not None:
                    children.append(
                        self.calendar_data_element.fromCalendar(
                            response.calendar, format))
                if response.error is not None:
                    children.append(response.error)
                if response.message is not None:
                    children.append(response.message)
                xml_responses.append(self.response_element(*children))

            return ScheduleResponseResponse(self.schedule_response_element,
                                            xml_responses, self.location)
        else:
            return self.success_response
Exemplo n.º 37
0
    "http_REPORT",
]

import string

from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from twext.web2 import responsecode
from twext.web2.http import HTTPError, StatusResponse
from twext.web2.dav import davxml
from twext.web2.dav.element.parser import lookupElement
from twext.web2.dav.http import ErrorResponse
from twext.web2.dav.util import davXMLFromStream

log = Logger()


max_number_of_matches = 500

class NumberOfMatchesWithinLimits(Exception):
    
    def __init__(self, limit):
        
        super(NumberOfMatchesWithinLimits, self).__init__()
        self.limit = limit
        
    def maxLimit(self):
        return self.limit

def http_REPORT(self, request):
Exemplo n.º 38
0
class MemCachePool(object):
    """
    A connection pool for MemCacheProtocol instances.

    @ivar clientFactory: The L{ClientFactory} implementation that will be used
        for each protocol.

    @ivar _maxClients: A C{int} indicating the maximum number of clients.

    @ivar _endpoint: An L{IStreamClientEndpoint} provider indicating the server
        to connect to.

    @ivar _reactor: The L{IReactorTCP} provider used to initiate new
        connections.

    @ivar _busyClients: A C{set} that contains all currently busy clients.

    @ivar _freeClients: A C{set} that contains all currently free clients.

    @ivar _pendingConnects: A C{int} indicating how many connections are in
        progress.
    """
    log = Logger()

    clientFactory = MemCacheClientFactory

    REQUEST_LOGGING_SIZE = 1024

    def __init__(self, endpoint, maxClients=5, reactor=None):
        """
        @param endpoint: An L{IStreamClientEndpoint} indicating the server to
            connect to.

        @param maxClients: A C{int} indicating the maximum number of clients.

        @param reactor: An L{IReactorTCP} provider used to initiate new
            connections.
        """
        self._endpoint = endpoint
        self._maxClients = maxClients

        if reactor is None:
            from twisted.internet import reactor
        self._reactor = reactor

        self.shutdown_deferred = None
        self.shutdown_requested = False
        reactor.addSystemEventTrigger(
            'before', 'shutdown', self._shutdownCallback
        )

        self._busyClients = set([])
        self._freeClients = set([])
        self._pendingConnects = 0
        self._commands = []

    def _isIdle(self):
        return (
            len(self._busyClients) == 0 and
            len(self._commands) == 0 and
            self._pendingConnects == 0
        )

    def _shutdownCallback(self):
        self.shutdown_requested = True
        if self._isIdle():
            return None
        self.shutdown_deferred = Deferred()
        return self.shutdown_deferred

    def _newClientConnection(self):
        """
        Create a new client connection.

        @return: A L{Deferred} that fires with the L{IProtocol} instance.
        """
        self.log.debug(
            "Initiating new client connection to: {r!r}", r=self._endpoint
        )
        self._logClientStats()

        self._pendingConnects += 1

        def _connected(client):
            self._pendingConnects -= 1

            return client

        factory = self.clientFactory()
        factory.noisy = False

        factory.connectionPool = self

        connect(self._endpoint, factory)
        d = factory.deferred

        d.addCallback(_connected)
        return d

    def _performRequestOnClient(self, client, command, *args, **kwargs):
        """
        Perform the given request on the given client.

        @param client: A L{PooledMemCacheProtocol} that will be used to perform
            the given request.

        @param command: A C{str} representing an attribute of
            L{MemCacheProtocol}.
        @param args: Any positional arguments that should be passed to
            C{command}.
        @param kwargs: Any keyword arguments that should be passed to
            C{command}.

        @return: A L{Deferred} that fires with the result of the given command.
        """
        def _freeClientAfterRequest(result):
            self.clientFree(client)
            return result

        def _reportError(failure):
            """
            Upon memcache error, log the failed request along with the error
            message and free the client.
            """
            self.log.error(
                "Memcache error: {ex}; request: {cmd} {args}",
                ex=failure.value,
                cmd=command,
                args=" ".join(args)[:self.REQUEST_LOGGING_SIZE],
            )
            self.clientFree(client)

        self.clientBusy(client)
        method = getattr(client, command, None)
        if method is not None:
            d = method(*args, **kwargs)
        else:
            d = fail(Failure(NoSuchCommand()))

        d.addCallbacks(_freeClientAfterRequest, _reportError)

        return d

    def performRequest(self, command, *args, **kwargs):
        """
        Select an available client and perform the given request on it.

        @param command: A C{str} representing an attribute of
            L{MemCacheProtocol}.
        @param args: Any positional arguments that should be passed to
            C{command}.
        @param kwargs: Any keyword arguments that should be passed to
            C{command}.

        @return: A L{Deferred} that fires with the result of the given command.
        """

        if len(self._freeClients) > 0:
            client = self._freeClients.pop()

            d = self._performRequestOnClient(
                client, command, *args, **kwargs)

        elif (
            len(self._busyClients) + self._pendingConnects >= self._maxClients
        ):
            d = Deferred()
            self._commands.append((d, command, args, kwargs))
            self.log.debug(
                "Command queued: {c}, {a!r}, {k!r}", c=command, a=args, k=kwargs
            )
            self._logClientStats()

        else:
            d = self._newClientConnection()
            d.addCallback(self._performRequestOnClient,
                          command, *args, **kwargs)

        return d

    def _logClientStats(self):
        self.log.debug(
            "Clients #free: {f}, #busy: {b}, #pending: {p}, #queued: {q}",
            f=len(self._freeClients),
            b=len(self._busyClients),
            p=self._pendingConnects,
            q=len(self._commands),
        )

    def clientGone(self, client):
        """
        Notify that the given client is to be removed from the pool completely.

        @param client: An instance of L{PooledMemCacheProtocol}.
        """
        if client in self._busyClients:
            self._busyClients.remove(client)

        elif client in self._freeClients:
            self._freeClients.remove(client)

        self.log.debug("Removed client: {c!r}", c=client)
        self._logClientStats()

    def clientBusy(self, client):
        """
        Notify that the given client is being used to complete a request.

        @param client: An instance of C{self.clientFactory}
        """

        if client in self._freeClients:
            self._freeClients.remove(client)

        self._busyClients.add(client)

        self.log.debug("Busied client: {c!r}", c=client)
        self._logClientStats()

    def clientFree(self, client):
        """
        Notify that the given client is free to handle more requests.

        @param client: An instance of C{self.clientFactory}
        """
        if client in self._busyClients:
            self._busyClients.remove(client)

        self._freeClients.add(client)

        if self.shutdown_deferred and self._isIdle():
            self.shutdown_deferred.callback(None)

        if len(self._commands) > 0:
            d, command, args, kwargs = self._commands.pop(0)

            self.log.debug(
                "Performing Queued Command: {c}, {a}, {k}",
                c=command, a=args, k=kwargs,
            )
            self._logClientStats()

            _ign_d = self.performRequest(
                command, *args, **kwargs)

            _ign_d.addCallback(d.callback)

        self.log.debug("Freed client: {c!r}", c=client)
        self._logClientStats()

    def suggestMaxClients(self, maxClients):
        """
        Suggest the maximum number of concurrently connected clients.

        @param maxClients: A C{int} indicating how many client connections we
            should keep open.
        """
        self._maxClients = maxClients

    def get(self, *args, **kwargs):
        return self.performRequest('get', *args, **kwargs)

    def set(self, *args, **kwargs):
        return self.performRequest('set', *args, **kwargs)

    def checkAndSet(self, *args, **kwargs):
        return self.performRequest('checkAndSet', *args, **kwargs)

    def delete(self, *args, **kwargs):
        return self.performRequest('delete', *args, **kwargs)

    def add(self, *args, **kwargs):
        return self.performRequest('add', *args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.performRequest('increment', *args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.performRequest('decrement', *args, **kwargs)

    def flushAll(self, *args, **kwargs):
        return self.performRequest('flushAll', *args, **kwargs)
Exemplo n.º 39
0
from twisted.internet.defer import inlineCallbacks, returnValue
from twext.web2 import responsecode
from twext.web2.dav.util import allDataFromStream, parentForURL
from twext.web2.http import HTTPError, StatusResponse

from twext.python.log import Logger
from twext.web2.dav.http import ErrorResponse

from twistedcaldav.caldavxml import caldav_namespace

from twistedcaldav.method.put_common import StoreCalendarObjectResource
from twistedcaldav.resource import isPseudoCalendarCollectionResource
from twistedcaldav.static import CalDAVFile

log = Logger()

from twistedcaldav.carddavxml import carddav_namespace
from twistedcaldav.method.put_addressbook_common import StoreAddressObjectResource
from twistedcaldav.resource import isAddressBookCollectionResource

@inlineCallbacks
def http_PUT(self, request):

    parentURL = parentForURL(request.uri)
    parent = (yield request.locateResource(parentURL))

    if isPseudoCalendarCollectionResource(parent):

        # Content-type check
        content_type = request.headers.getHeader("content-type")
Exemplo n.º 40
0
class MemCacheClientFactory(ReconnectingClientFactory):
    """
    A client factory for MemCache that reconnects and notifies a pool of it's
    state.

    @ivar connectionPool: A managing connection pool that we notify of events.
    @ivar deferred: A L{Deferred} that represents the initial connection.
    @ivar _protocolInstance: The current instance of our protocol that we pass
        to our connectionPool.
    """
    log = Logger()

    protocol = PooledMemCacheProtocol
    connectionPool = None
    _protocolInstance = None

    def __init__(self):
        self.deferred = Deferred()

    def clientConnectionLost(self, connector, reason):
        """
        Notify the connectionPool that we've lost our connection.
        """

        if self.connectionPool.shutdown_requested:
            # The reactor is stopping; don't reconnect
            return

        self.log.error("MemCache connection lost: {r}", r=reason)
        if self._protocolInstance is not None:
            self.connectionPool.clientBusy(self._protocolInstance)

        ReconnectingClientFactory.clientConnectionLost(
            self,
            connector,
            reason)

    def clientConnectionFailed(self, connector, reason):
        """
        Notify the connectionPool that we're unable to connect
        """
        self.log.error("MemCache connection failed: {r}", r=reason)
        if self._protocolInstance is not None:
            self.connectionPool.clientBusy(self._protocolInstance)

        ReconnectingClientFactory.clientConnectionFailed(
            self,
            connector,
            reason)

    def buildProtocol(self, addr):
        """
        Attach the C{self.connectionPool} to the protocol so it can tell it,
        when we've connected.
        """
        if self._protocolInstance is not None:
            self.connectionPool.clientGone(self._protocolInstance)

        self._protocolInstance = self.protocol()
        self._protocolInstance.factory = self
        return self._protocolInstance
Exemplo n.º 41
0
from os import close
from errno import EAGAIN, ENOBUFS
from socket import (socketpair, fromfd, error as SocketError, AF_UNIX,
                    SOCK_STREAM, SOCK_DGRAM)

from zope.interface import Interface

from twisted.internet.abstract import FileDescriptor
from twisted.internet.protocol import Protocol, Factory

from twext.python.log import Logger
from twext.python.sendmsg import sendmsg, recvmsg
from twext.python.sendfd import sendfd, recvfd
from twext.python.sendmsg import getsockfam

log = Logger()



class InheritingProtocol(Protocol, object):
    """
    When a connection comes in on this protocol, stop reading and writing, and
    dispatch the socket to another process via its factory.
    """

    def connectionMade(self):
        """
        A connection was received; transmit the file descriptor to another
        process via L{InheritingProtocolFactory} and remove my transport from
        the reactor.
        """
Exemplo n.º 42
0
import os
import re
import sys
import base64
import itertools

from subprocess import Popen, PIPE, STDOUT
from hashlib import md5, sha1

from twisted.internet import reactor
from twisted.web import client
from twisted.python import failure
from twext.python.log import Logger

log = Logger()
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect
from twext.internet.ssl import simpleClientContextFactory

##
# System Resources (Memory size and processor count)
##

try:
    from ctypes import (
        cdll,
        c_int,
        c_uint64,
        c_ulong,
        c_char_p,
Exemplo n.º 43
0
__all__ = ["http_PROPFIND"]

from twisted.python.failure import Failure
from twisted.internet.defer import inlineCallbacks, returnValue
from twext.web2.http import HTTPError
from twext.web2 import responsecode
from twext.web2.http import StatusResponse
from txdav.xml import element as davxml
from twext.web2.dav.http import MultiStatusResponse, statusForFailure, \
    ErrorResponse
from twext.web2.dav.util import normalizeURL, davXMLFromStream, parentForURL

from twext.python.log import Logger

log = Logger()

"""
This is a direct copy of the twisted implementation of PROPFIND except that it uses the
findChildrenFaster method to optimize child privilege checking.
"""

@inlineCallbacks
def http_PROPFIND(self, request):
    """
    Respond to a PROPFIND request. (RFC 2518, section 8.1)
    """
    if not self.exists():
        # Return 403 if parent does not allow Bind
        parentURL = parentForURL(request.uri)
        parent = (yield request.locateResource(parentURL))
Exemplo n.º 44
0
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.resource import WebAdminResource
from calendarserver.webcal.resource import WebCalendarResource

from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.sql import current_sql_schema
from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
from twext.python.filepath import CachingFilePath
from urllib import quote
from twisted.python.usage import UsageError


log = Logger()


def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
    """
    Construct a L{PostgresService} from a given configuration and subservice.

    @param config: the configuration to derive postgres configuration
        parameters from.

    @param subServiceFactory: A factory for the service to start once the
        L{PostgresService} has been initialized.

    @param uid: The user-ID to run the PostgreSQL server as.

    @param gid: The group-ID to run the PostgreSQL server as.
Exemplo n.º 45
0
class DAVPrincipalResource (
    WebDAVServerInfoMixIn,
    DirectoryPrincipalPropertySearchMixIn,
    SuperDAVPrincipalResource,
    DirectoryRenderingMixIn
):
    """
    Extended L{txweb2.dav.static.DAVFile} implementation.
    """
    log = Logger()

    def liveProperties(self):
        return super(DAVPrincipalResource, self).liveProperties() + (
            (calendarserver_namespace, "expanded-group-member-set"),
            (calendarserver_namespace, "expanded-group-membership"),
            (calendarserver_namespace, "record-type"),
        )

    http_REPORT = http_REPORT

    def render(self, request):
        if not self.exists():
            return responsecode.NOT_FOUND

        if self.isCollection():
            return self.renderDirectory(request)
        return super(DAVResource, self).render(request)

    @inlineCallbacks
    def readProperty(self, property, request):
        if type(property) is tuple:
            qname = property
        else:
            qname = property.qname()

        namespace, name = qname

        if namespace == dav_namespace:
            if name == "resourcetype":
                rtype = self.resourceType()
                returnValue(rtype)

        elif namespace == calendarserver_namespace:
            if name == "expanded-group-member-set":
                principals = (yield self.expandedGroupMembers())
                returnValue(customxml.ExpandedGroupMemberSet(
                    *[element.HRef(p.principalURL()) for p in principals]
                ))

            elif name == "expanded-group-membership":
                principals = (yield self.expandedGroupMemberships())
                returnValue(customxml.ExpandedGroupMembership(
                    *[element.HRef(p.principalURL()) for p in principals]
                ))

            elif name == "record-type":
                if hasattr(self, "record"):
                    returnValue(
                        customxml.RecordType(
                            self.record.service.recordTypeToOldName(
                                self.record.recordType
                            )
                        )
                    )
                else:
                    raise HTTPError(StatusResponse(
                        responsecode.NOT_FOUND,
                        "Property %s does not exist." % (qname,)
                    ))

        result = (yield super(DAVPrincipalResource, self).readProperty(property, request))
        returnValue(result)

    def groupMembers(self):
        return succeed(())

    def expandedGroupMembers(self):
        return succeed(())

    def groupMemberships(self):
        return succeed(())

    def expandedGroupMemberships(self):
        return succeed(())

    def resourceType(self):
        # Allow live property to be overridden by dead property
        if self.deadProperties().contains((dav_namespace, "resourcetype")):
            return self.deadProperties().get((dav_namespace, "resourcetype"))
        if self.isCollection():
            return element.ResourceType(element.Principal(), element.Collection())
        else:
            return element.ResourceType(element.Principal())
Exemplo n.º 46
0
class AuthorizedHTTPGetter(client.HTTPPageGetter):
    log = Logger()

    def handleStatus_401(self):

        self.quietLoss = 1
        self.transport.loseConnection()

        if not hasattr(self.factory, "username"):
            self.factory.deferred.errback(
                failure.Failure(Unauthorized("Authentication required")))
            return self.factory.deferred

        if hasattr(self.factory, "retried"):
            self.factory.deferred.errback(
                failure.Failure(
                    Unauthorized(
                        "Could not authenticate user %s with calendar server" %
                        (self.factory.username, ))))
            return self.factory.deferred

        self.factory.retried = True

        # self.log.debug("Got a 401 trying to inject [{hdrs}]", hdrs=self.headers)
        details = {}
        basicAvailable = digestAvailable = False
        wwwauth = self.headers.get("www-authenticate")
        for item in wwwauth:
            if item.startswith("basic "):
                basicAvailable = True
            if item.startswith("digest "):
                digestAvailable = True
                wwwauth = item[7:]

                def unq(s):
                    if s[0] == s[-1] == '"':
                        return s[1:-1]
                    return s

                parts = wwwauth.split(',')
                for (k, v) in [p.split('=', 1) for p in parts]:
                    details[k.strip()] = unq(v.strip())

        user = self.factory.username
        pswd = self.factory.password

        if digestAvailable and details:
            digest = calcResponse(
                calcHA1(details.get('algorithm'), user,
                        details.get('realm'), pswd, details.get('nonce'),
                        details.get('cnonce')), details.get('algorithm'),
                details.get('nonce'), details.get('nc'), details.get('cnonce'),
                details.get('qop'), self.factory.method, self.factory.url,
                None)

            if details.get('qop'):
                response = (
                    'Digest username="******", realm="%s", nonce="%s", uri="%s", '
                    'response=%s, algorithm=%s, cnonce="%s", qop=%s, nc=%s' % (
                        user,
                        details.get('realm'),
                        details.get('nonce'),
                        self.factory.url,
                        digest,
                        details.get('algorithm'),
                        details.get('cnonce'),
                        details.get('qop'),
                        details.get('nc'),
                    ))
            else:
                response = (
                    'Digest username="******", realm="%s", nonce="%s", uri="%s", '
                    'response=%s, algorithm=%s' % (
                        user,
                        details.get('realm'),
                        details.get('nonce'),
                        self.factory.url,
                        digest,
                        details.get('algorithm'),
                    ))

            self.factory.headers['Authorization'] = response

            if self.factory.scheme == 'https':
                connect(
                    GAIEndpoint(reactor, self.factory.host, self.factory.port,
                                simpleClientContextFactory(self.factory.host)),
                    self.factory)
            else:
                connect(
                    GAIEndpoint(reactor, self.factory.host, self.factory.port),
                    self.factory)
            # self.log.debug("Retrying with digest after 401")

            return self.factory.deferred

        elif basicAvailable:
            basicauth = "%s:%s" % (user, pswd)
            basicauth = "Basic " + base64.encodestring(basicauth)
            basicauth = basicauth.replace("\n", "")

            self.factory.headers['Authorization'] = basicauth

            if self.factory.scheme == 'https':
                connect(
                    GAIEndpoint(reactor, self.factory.host, self.factory.port,
                                simpleClientContextFactory(self.factory.host)),
                    self.factory)
            else:
                connect(
                    GAIEndpoint(reactor, self.factory.host, self.factory.port),
                    self.factory)
            # self.log.debug("Retrying with basic after 401")

            return self.factory.deferred

        else:
            self.factory.deferred.errback(
                failure.Failure(
                    Unauthorized(
                        "Mail gateway not able to process reply; calendar server returned 401 and doesn't support basic or digest"
                    )))
            return self.factory.deferred
Exemplo n.º 47
0
from pycalendar.datetime import DateTime
from pycalendar.duration import Duration
from twext.enterprise.dal.record import fromTable
from twext.enterprise.dal.syntax import Delete, Select
from twext.enterprise.jobqueue import WorkItem, RegeneratingWorkItem
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
from txdav.caldav.datastore.scheduling.icalsplitter import iCalSplitter
from txdav.caldav.datastore.sql import CalendarStoreFeatures, ComponentUpdateState
from txdav.common.datastore.sql_tables import schema
import datetime
import hashlib

log = Logger()


class GroupCacherPollingWork(
    RegeneratingWorkItem,
    fromTable(schema.GROUP_CACHER_POLLING_WORK)
):

    group = "group_cacher_polling"

    @classmethod
    def initialSchedule(cls, store, seconds):
        def _enqueue(txn):
            return GroupCacherPollingWork.reschedule(txn, seconds)

        if config.InboxCleanup.Enabled:
Exemplo n.º 48
0
class CachingPropertyStore (object):
    """
    DAV property store using a dict in memory on top of another
    property store implementation.
    """
    log = Logger()

    def __init__(self, propertyStore):
        self.propertyStore = propertyStore
        self.resource = propertyStore.resource

    def get(self, qname, uid=None):
        # self.log.debug("Get: {p}, {n}", p=self.resource.fp.path, n=qname)

        cache = self._cache()

        cachedQname = qname + (uid,)

        if cachedQname in cache:
            property = cache.get(cachedQname, None)
            if property is None:
                self.log.debug("Cache miss: {s!r}, {p}, {n}", s=self, p=self.resource.fp.path, n=qname)
                try:
                    property = self.propertyStore.get(qname, uid)
                except HTTPError:
                    del cache[cachedQname]
                    raise PropertyNotFoundError(qname)
                cache[cachedQname] = property

            return property
        else:
            raise PropertyNotFoundError(qname)

    def set(self, property, uid=None):
        # self.log.debug("Set: {p}, {prop!r}", p=self.resource.fp.path, prop=property)

        cache = self._cache()

        cachedQname = property.qname() + (uid,)

        cache[cachedQname] = None
        self.propertyStore.set(property, uid)
        cache[cachedQname] = property

    def contains(self, qname, uid=None):
        # self.log.debug("Contains: {p}, {n}", p=self.resource.fp.path, n=qname)

        cachedQname = qname + (uid,)

        try:
            cache = self._cache()
        except HTTPError, e:
            if e.response.code == responsecode.NOT_FOUND:
                return False
            else:
                raise

        if cachedQname in cache:
            # self.log.debug("Contains cache hit: {s!r}, {p}, {n}", s=self, p=self.resource.fp.path, n=qname)
            return True
        else:
            return False
Exemplo n.º 49
0
##

import os
import re
import sys
import base64

from subprocess import Popen, PIPE, STDOUT
from hashlib import md5, sha1

from twisted.internet import ssl, reactor
from twisted.web import client
from twisted.python import failure
from twext.python.log import LoggingMixIn, Logger

log = Logger()
from twext.internet.gaiendpoint import GAIEndpoint

##
# System Resources (Memory size and processor count)
##

try:
    from ctypes import *
    import ctypes.util
    hasCtypes = True
except ImportError:
    hasCtypes = False

if sys.platform == "darwin" and hasCtypes:
    libc = cdll.LoadLibrary(ctypes.util.find_library("libc"))
Exemplo n.º 50
0
class DirectoryService(BaseDirectoryService):
    """
    OpenDirectory directory service.
    """
    log = Logger()

    recordType = ConstantsContainer((
        BaseRecordType.user,
        BaseRecordType.group,
    ))

    fieldName = ConstantsContainer((BaseDirectoryService.fieldName, FieldName))

    # The auto release pool is a class attribute; if _poolDeletionRegistered
    # is True, that means someone has already added a SystemEventTrigger
    _poolDeletionRegistered = False

    def __init__(self,
                 nodeName=ODSearchPath.search.value,
                 suppressSystemRecords=False):
        """
        @param nodeName: the OpenDirectory node to query against.
        @type nodeName: bytes

        @parm suppressSystemRecords: If True, any results returned from this
            service will not contain Mac OS X "system" records.
        @type suppressSystemRecords: C{Boolean}
        """
        self._nodeName = nodeName
        self._suppressSystemRecords = suppressSystemRecords

        # Create an autorelease pool which will get deleted when someone
        # calls _maybeResetPool( ), but no more often than 60 seconds, hence
        # the "maybe"
        DirectoryService._resetAutoreleasePool()

        # Register a pool delete to happen at shutdown
        if not DirectoryService._poolDeletionRegistered:
            from twisted.internet import reactor
            DirectoryService._poolDeletionRegistered = True
            reactor.addSystemEventTrigger("after", "shutdown",
                                          DirectoryService._deletePool)

    @classmethod
    def _deletePool(cls):
        """
        Delete the autorelease pool if we have one
        """
        if hasattr(cls, "_autoReleasePool"):
            del cls._autoReleasePool

    @classmethod
    def _resetAutoreleasePool(cls):
        """
        Create an autorelease pool, deleting the old one if we had one.
        """
        cls._deletePool()

        cls._autoReleasePool = NSAutoreleasePool.alloc().init()
        cls._poolCreationTime = time()

    @classmethod
    def _maybeResetPool(cls):
        """
        If it's been at least 60 seconds since the last time we created the
        pool, delete the pool (which drains it) and create a new one.
        """
        poolCreationTime = getattr(cls, "_poolCreationTime", 0)
        now = time()
        if (now - poolCreationTime) > 60:
            cls._resetAutoreleasePool()

    @property
    def nodeName(self):
        return self._nodeName

    @property
    def realmName(self):
        return u"OpenDirectory Node {self.nodeName!r}".format(self=self)

    @property
    def node(self):
        """
        Get the underlying (network) directory node.
        """
        if not hasattr(self, "_node"):
            self._node = self._connect(self._nodeName)
        return self._node

    @property
    def localNode(self):
        """
        Get the local node from the search path (if any), so that we can
        handle it specially.
        """
        if not hasattr(self, "_localNode"):

            if self.nodeName == ODSearchPath.search.value:
                details, error = self.node.nodeDetailsForKeys_error_(
                    (ODAttribute.searchPath.value, ), None)
                if error:
                    self.log.error("Error while examining Search path",
                                   error=error)
                    raise OpenDirectoryConnectionError(
                        "Unable to connect to OpenDirectory node", error)

                if (ODSearchPath.local.value
                        in details[ODAttribute.searchPath.value]):
                    self._localNode = self._connect(ODSearchPath.local.value)
                else:
                    self._localNode = None

            elif self.nodeName == ODSearchPath.local.value:
                self._localNode = self.node

            else:
                self._localNode = None

        return self._localNode

    @property
    def session(self):
        """
        Get the underlying directory session.
        """
        if not hasattr(self, "_session"):
            session = ODSession.defaultSession()
            self._session = session
        return self._session

    def _connect(self, nodeName):
        """
        Connect to the directory server.

        @param nodeName: The OD node name to connect to
        @type nodeName: C{str}

        @return: the OD node

        @raises: L{OpenDirectoryConnectionError} if unable to connect.
        """

        node, error = ODNode.nodeWithSession_name_error_(
            self.session, nodeName, None)

        if error:
            self.log.error(
                "Error while trying to connect to OpenDirectory node "
                "{source.nodeName!r}: {error}",
                error=error)
            raise OpenDirectoryConnectionError(
                "Unable to connect to OpenDirectory node", error)

        return node

    def _queryStringAndRecordTypeFromMatchExpression(self, expression):
        """
        Generates an OD query string from a match expression.

        @param expression: A match expression.
        @type expression: L{MatchExpression}

        @return: tuple(OD query string, query's OD record type string)
        @rtype: tuple(C{unicode}, C{unicode})
        """
        matchType = ODMatchType.fromMatchType(expression.matchType)
        if matchType is None:
            raise QueryNotSupportedError(
                "Unknown match type: {0}".format(matchType))

        flags = tuple(expression.flags)

        if MatchFlags.NOT in flags:
            notOp = u"!"
        else:
            notOp = u""

        # if MatchFlags.caseInsensitive not in flags:
        #     raise NotImplementedError("Need to handle case sensitive")

        if expression.fieldName is self.fieldName.recordType:
            return (notOp,
                    ODRecordType.fromRecordType(expression.fieldValue).value)

        if expression.fieldName is self.fieldName.uid:
            odAttr = ODAttribute.guid
            value = expression.fieldValue

        else:
            odAttr = ODAttribute.fromFieldName(expression.fieldName)
            if odAttr is None:
                raise OpenDirectoryQueryError("Unknown field name: {0}".format(
                    expression.fieldName))
            value = expression.fieldValue

        value = unicode(value)  # We want unicode
        value = value.translate(QUOTING_TABLE)  # Escape special chars

        return (
            matchType.queryString.format(notOp=notOp,
                                         attribute=odAttr.value,
                                         value=value),
            None,
        )

    def _queryStringAndRecordTypesFromCompoundExpression(
            self, expression, recordTypes):
        """
        Generates an OD query string from a compound expression.

        @param expression: A compound expression.
        @type expression: L{MatchExpression}

        @param recordTypes: allowed OD record type strings
        @type recordTypes: set(C{unicode})

        @return: tuple(OD query string, set(query's OD record type strings))
        @rtype: (C{unicode}, set(C{unicode}))
        """
        if recordTypes is None:
            recordTypes = set([t.value for t in ODRecordType.iterconstants()])

        queryTokens = []
        for subExpression in expression.expressions:
            queryToken, subExpRecordTypes = (
                self._queryStringAndRecordTypesFromExpression(
                    subExpression, recordTypes))
            if subExpRecordTypes:
                if isinstance(subExpRecordTypes, unicode):
                    if (
                            # AND or NOR
                        (expression.operand is Operand.AND) !=
                            bool(queryToken)):
                        if expression.operand is Operand.AND:
                            recordTypes = (recordTypes
                                           & set([subExpRecordTypes]))
                        else:
                            recordTypes = (recordTypes -
                                           set([subExpRecordTypes]))
                        queryToken = None
                    else:
                        raise QueryNotSupportedError(
                            "Record type matches must AND or NOR")
                else:
                    recordTypes = subExpRecordTypes

            if queryToken:
                queryTokens.append(queryToken)

        if queryTokens:
            if len(queryTokens) > 1:
                if expression.operand is Operand.AND:
                    queryTokens[:0] = (u"&")
                else:
                    queryTokens[:0] = (u"|")

            if len(queryTokens) > 2:
                queryTokens[:0] = (u"(")
                queryTokens.append(u")")

        return (u"".join(queryTokens), recordTypes)

    def _queryStringAndRecordTypesFromExpression(
        self,
        expression,
        recordTypes=set([t.value for t in ODRecordType.iterconstants()])):
        """
        Converts either a MatchExpression or a CompoundExpression into an LDAP
        query string.

        @param expression: An expression.
        @type expression: L{MatchExpression} or L{CompoundExpression}

        @param recordTypes: allowed OD record type strings
        @type recordTypes: set(C{unicode})

        @return: tuple(OD query string, set(query's OD record type strings))
        @rtype: (C{unicode}, set(C{unicode}))
        """

        if isinstance(expression, MatchExpression):
            queryString, recordType = (
                self._queryStringAndRecordTypeFromMatchExpression(expression))
            return (queryString, recordType if recordType else recordTypes)

        if isinstance(expression, CompoundExpression):
            return self._queryStringAndRecordTypesFromCompoundExpression(
                expression, recordTypes)

        raise QueryNotSupportedError(
            "Unknown expression type: {0!r}".format(expression))

    def _queryFromCompoundExpression(self,
                                     expression,
                                     recordTypes=None,
                                     local=False,
                                     limitResults=None):
        """
        Form an OpenDirectory query from a compound expression.

        @param expression: A compound expression.
        @type expression: L{CompoundExpression}

        @param local: Whether to restrict the query to the local node
        @type local: C{Boolean}

        @return: A native OpenDirectory query or C{None} if the query will
            return no records
        @rtype: L{ODQuery}
        """

        if local:
            node = self.localNode
        else:
            node = self.node

        queryString, _ignore_expressionRecordTypes = (
            self._queryStringAndRecordTypesFromExpression(expression))

        # Scrub unsupported recordTypes
        supportedODRecordTypes = []
        for recordType in self.recordTypes():
            odRecordType = ODRecordType.fromRecordType(recordType)
            if odRecordType is not None:
                supportedODRecordTypes.append(odRecordType.value)
        if recordTypes is not None:
            scrubbedRecordTypes = []
            for recordType in recordTypes:
                recordType = ODRecordType.fromRecordType(recordType).value
                if recordType in supportedODRecordTypes:
                    scrubbedRecordTypes.append(recordType)
        else:
            scrubbedRecordTypes = supportedODRecordTypes

        if not scrubbedRecordTypes:
            # None of the requested recordTypes are supported.
            return None

        if queryString:
            matchType = ODMatchType.compound.value
        else:
            matchType = ODMatchType.any.value

        if limitResults is None:
            maxResults = 0
        else:
            maxResults = limitResults

        query, error = ODQuery.queryWithNode_forRecordTypes_attribute_matchType_queryValues_returnAttributes_maximumResults_error_(
            node, scrubbedRecordTypes, None, matchType, queryString,
            self._getFetchAttributes(), maxResults, None)

        if error:
            self.log.error(
                "Error while forming OpenDirectory compound query: {error}",
                error=error)
            raise OpenDirectoryQueryError(
                "Unable to form OpenDirectory compound query", error)

        return query

    def _getFetchAttributes(self):
        if not hasattr(self, "_fetchAttributes"):
            self._fetchAttributes = [
                a.value for a in ODAttribute.iterconstants()
            ]
        return self._fetchAttributes

    def _getSupportedODRecordTypes(self):
        if not hasattr(self, "_supportedODRecordTypes"):
            supportedODRecordTypes = []
            for rt in self.recordTypes():
                odRecordType = ODRecordType.fromRecordType(rt)
                if odRecordType is not None:
                    supportedODRecordTypes.append(odRecordType.value)
            self._supportedODRecordTypes = supportedODRecordTypes
        return self._supportedODRecordTypes

    def _queryFromMatchExpression(self,
                                  expression,
                                  recordTypes=None,
                                  local=False,
                                  limitResults=None):
        """
        Form an OpenDirectory query from a match expression.

        @param expression: A match expression.
        @type expression: L{MatchExpression}

        @param recordTypes: Record types to insert into the query; None for no
            filtering.
        @type recordTypes: iterable of L{NamedConstant}, or None

        @param local: Whether to restrict the query to the local node
        @type local: C{Boolean}

        @return: A native OpenDirectory query.
        @rtype: L{ODQuery}
        """
        if not isinstance(expression, MatchExpression):
            raise TypeError(expression)

        matchType = ODMatchType.fromMatchType(expression.matchType)
        if matchType is None:
            raise QueryNotSupportedError(
                "Unknown match type: {0}".format(matchType))
        matchType = matchType.value

        flags = tuple(expression.flags)

        if MatchFlags.caseInsensitive in flags:
            caseInsensitive = 0x100
        else:
            caseInsensitive = 0x0

        if limitResults is None:
            maxResults = 0
        else:
            maxResults = limitResults

        # For OpenDirectory, use guid for uid:
        if expression.fieldName is self.fieldName.uid:
            expression.fieldName = self.fieldName.guid

        if expression.fieldName is self.fieldName.recordType:

            queryAttribute = None
            queryValue = None
            matchType = ODMatchType.any.value
            caseInsensitive = 0x0

            odRecordTypes = [
                ODRecordType.fromRecordType(expression.fieldValue).value,
            ]
        else:
            if MatchFlags.NOT in flags:
                raise NotImplementedError()

            if recordTypes is None:
                odRecordTypes = [t.value for t in ODRecordType.iterconstants()]
            else:
                odRecordTypes = [
                    ODRecordType.fromRecordType(r).value for r in recordTypes
                ]

            queryAttribute = ODAttribute.fromFieldName(
                expression.fieldName).value

            # TODO: Add support other value types:
            valueType = self.fieldName.valueType(expression.fieldName)
            if valueType == UUID:
                queryValue = unicode(expression.fieldValue).upper()
            else:
                queryValue = unicode(expression.fieldValue)

        if local:
            node = self.localNode
        else:
            node = self.node

        # Scrub unsupported recordTypes
        scrubbedRecordTypes = []
        for odRecordType in odRecordTypes:
            if odRecordType in self._getSupportedODRecordTypes():
                scrubbedRecordTypes.append(odRecordType)

        if not scrubbedRecordTypes:
            # None of the requested recordTypes are supported.
            raise UnsupportedRecordTypeError(u",".join(r.name
                                                       for r in recordTypes))

        query, error = ODQuery.queryWithNode_forRecordTypes_attribute_matchType_queryValues_returnAttributes_maximumResults_error_(
            node, scrubbedRecordTypes,
            queryAttribute, matchType | caseInsensitive, queryValue,
            self._getFetchAttributes(), maxResults, None)

        if error:
            self.log.error(
                "Error while forming OpenDirectory match query: {error}",
                error=error)
            raise OpenDirectoryQueryError(
                "Unable to form OpenDirectory match query", error)

        return query

    def _isSystemRecord(self, odRecord):
        """
        Examines the OD record to see if it's a Mac OS X system account record.

        @param odRecord: an OD record object

        @return: True if system account record, False otherwise
        @rtype: C{Boolean}
        """
        details, error = odRecord.recordDetailsForAttributes_error_(None, None)

        if error:
            self.log.error("Error while reading OpenDirectory record: {error}",
                           error=error)
            raise OpenDirectoryDataError("Unable to read OpenDirectory record",
                                         error)

        # GeneratedUID matches a special pattern
        guid = details.get(ODAttribute.guid.value, (u"", ))[0]
        if guid.lower().startswith("ffffeeee-dddd-cccc-bbbb-aaaa"):
            return True

        # ISHidden is True
        isHidden = details.get(ODAttribute.isHidden.value, False)
        if isHidden:
            return True

        # Record-type specific indicators...
        recType = details.get(ODAttribute.recordType.value, (u"", ))[0]

        # ...users with UniqueID <= 500 (and is not 99)
        if recType == ODRecordType.user.value:
            uniqueId = int(
                details.get(ODAttribute.uniqueId.value, (u"0", ))[0])
            if uniqueId <= 500 and uniqueId != 99:
                return True

        # ...groups with PrimaryGroupID <= 500 (and is not 99)
        elif recType == ODRecordType.group.value:
            primaryGroupId = int(
                details.get(ODAttribute.primaryGroupId.value, (u"0", ))[0])
            if primaryGroupId <= 500 and primaryGroupId != 99:
                return True

        # RecordName matches specific prefixes; if *all* RecordName values for
        # a record start with either of these prefixes, it's a system record.
        shortNames = details.get(ODAttribute.shortName.value, (u"", ))
        for shortName in shortNames:
            if not (shortName.startswith("_")
                    or shortName.startswith("com.apple.")):
                break
        else:
            return True

        return False

    @inlineCallbacks
    def _recordsFromQuery(self, query, timeoutSeconds=None):
        """
        Executes a query and generates directory records from it.

        @param query: A query.
        @type query: L{ODQuery}

        @param timeoutSeconds: number of seconds after which the request
            should timeout (currently unused)
        @type timeoutSeconds: C{integer}

        @return: The records produced by executing the query.
        @rtype: list of L{DirectoryRecord}
        """

        # We can call scheduleInRunLoop:forMode:, which will call back to
        # its delegate...

        if query is None:
            returnValue(())

        if DEFER_TO_THREAD:
            odRecords, error = (yield deferToThreadWithAutoReleasePool(
                query.resultsAllowingPartial_error_, False, None))
        else:
            odRecords, error = query.resultsAllowingPartial_error_(False, None)

        if error:
            self.log.error(
                "Error while executing OpenDirectory query: {error}",
                error=error)
            returnValue(
                fail(
                    OpenDirectoryQueryError(
                        "Unable to execute OpenDirectory query", error)))

        result = []
        for odRecord in odRecords:

            # Conditionally suppress system records
            if self._suppressSystemRecords and self._isSystemRecord(odRecord):
                continue

            try:
                record = DirectoryRecord(self, odRecord)
            except InvalidDirectoryRecordError as e:
                self.log.warn(
                    "Invalid OpenDirectory record ({error}).  "
                    "Fields: {error.fields}",
                    error=e)
                continue

            result.append(record)

        returnValue(result)

    def recordsFromNonCompoundExpression(self,
                                         expression,
                                         recordTypes=None,
                                         records=None,
                                         limitResults=None,
                                         timeoutSeconds=None):
        DirectoryService._maybeResetPool()

        if isinstance(expression, MatchExpression):
            self.log.debug("OD call: {}".format(expression))
            try:
                query = self._queryFromMatchExpression(
                    expression,
                    recordTypes=recordTypes,
                    limitResults=limitResults,
                )
                return self._recordsFromQuery(query,
                                              timeoutSeconds=timeoutSeconds)

            except QueryNotSupportedError:
                pass  # Let the superclass try

            except UnsupportedRecordTypeError:
                return succeed([])

        return BaseDirectoryService.recordsFromNonCompoundExpression(
            self,
            expression,
            limitResults=limitResults,
            timeoutSeconds=timeoutSeconds)

    @inlineCallbacks
    def recordsFromCompoundExpression(self,
                                      expression,
                                      recordTypes=None,
                                      records=None,
                                      limitResults=None,
                                      timeoutSeconds=None):
        """
        Returns records matching the CompoundExpression.  Because the
        local node doesn't perform Compound queries in a case insensitive
        fashion (but will do case insensitive for a simple MatchExpression)
        also call localRecordsFromCompoundExpression() which breaks the
        CompoundExpression up into MatchExpressions for sending to the local
        node.
        """
        DirectoryService._maybeResetPool()

        try:
            self.log.debug("OD call: {}".format(expression))
            query = self._queryFromCompoundExpression(
                expression, recordTypes=recordTypes, limitResults=limitResults)

        except QueryNotSupportedError:
            returnValue((yield
                         BaseDirectoryService.recordsFromCompoundExpression(
                             self,
                             expression,
                             recordTypes=recordTypes,
                             limitResults=limitResults,
                             timeoutSeconds=timeoutSeconds,
                         )))

        results = yield self._recordsFromQuery(query)

        if self.localNode is not None:

            localRecords = yield self.localRecordsFromCompoundExpression(
                expression,
                recordTypes=recordTypes,
                limitResults=limitResults,
                timeoutSeconds=timeoutSeconds)
            for localRecord in localRecords:
                if localRecord not in results:
                    results.append(localRecord)

        returnValue(results)

    @inlineCallbacks
    def localRecordsFromCompoundExpression(self,
                                           expression,
                                           recordTypes=None,
                                           limitResults=None,
                                           timeoutSeconds=None):
        """
        Takes a CompoundExpression, and recursively goes through each
        MatchExpression, passing those specifically to the local node, and
        ADDing or ORing the results as needed.
        """

        # We keep a set of resulting uids for each sub expression so it's
        # easy to either union (OR) or intersection (AND) the sets
        sets = []

        # Mapping of uid to record
        byUID = {}

        for subExpression in expression.expressions:

            if isinstance(subExpression, CompoundExpression):
                subRecords = yield self.localRecordsFromCompoundExpression(
                    subExpression,
                    recordTypes=recordTypes,
                    limitResults=limitResults,
                    timeoutSeconds=timeoutSeconds)

            elif isinstance(subExpression, MatchExpression):
                try:
                    subQuery = self._queryFromMatchExpression(
                        subExpression,
                        recordTypes=recordTypes,
                        local=True,
                        limitResults=limitResults)
                except UnsupportedRecordTypeError:
                    continue
                subRecords = yield self._recordsFromQuery(
                    subQuery, timeoutSeconds=timeoutSeconds)

            else:
                raise QueryNotSupportedError(
                    "Unsupported expression type: {}".format(
                        type(subExpression)))

            newSet = set()
            for record in subRecords:
                byUID[record.uid] = record
                newSet.add(record.uid)
            sets.append(newSet)

        results = []
        if byUID:  # If there are any records
            if expression.operand == Operand.AND:
                uids = set.intersection(*sets)
            elif expression.operand == Operand.OR:
                uids = set.union(*sets)
            else:
                raise QueryNotSupportedError("Unsupported operand: {}".format(
                    expression.operand))
            for uid in uids:
                results.append(byUID[uid])

        returnValue(results)

    def _getUserRecord(self, username):
        """
        Fetch the OD record for a given user.

        @return: ODRecord, or None
        """
        record, error = self.node.recordWithRecordType_name_attributes_error_(
            ODRecordType.user.value, username, None, None)
        if error:
            self.log.error("Error while looking up user: {error}", error=error)
            raise OpenDirectoryQueryError("Unable to look up user", error)

        return record

    @inlineCallbacks
    def recordWithUID(self, uid, timeoutSeconds=None):
        returnValue(
            firstResult(
                (yield
                 self.recordsWithFieldValue(BaseFieldName.uid,
                                            uid,
                                            timeoutSeconds=timeoutSeconds))))

    @inlineCallbacks
    def recordWithGUID(self, guid, timeoutSeconds=None):
        returnValue(
            firstResult(
                (yield
                 self.recordsWithFieldValue(BaseFieldName.guid,
                                            guid,
                                            timeoutSeconds=timeoutSeconds))))

    @inlineCallbacks
    def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):

        records = yield self.recordsFromNonCompoundExpression(
            MatchExpression(self.fieldName.shortNames, shortName),
            recordTypes=(recordType, ),
            limitResults=1)

        try:
            record = uniqueResult(records)
        except DirectoryServiceError:
            self.log.error(
                "Duplicate records for name: {name} ({recordType})".format(
                    name=shortName, recordType=recordType.name))
            raise

        returnValue(record)
Exemplo n.º 51
0
# limitations under the License.
##

from twext.python.log import Logger

from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.names import dns
from twisted.names.authority import BindAuthority
from twisted.names.client import getResolver
from twisted.names.error import DomainError, AuthoritativeDomainError

from twistedcaldav.config import config

import socket

log = Logger()

DebugResolver = None


def getIPsFromHost(host):
    """
    Map a hostname to an IPv4 or IPv6 address.

    @param host: the hostname
    @type host: C{str}

    @return: a C{set} of IPs
    """
    ips = set()
    # Use AF_UNSPEC rather than iterating (socket.AF_INET, socket.AF_INET6)
Exemplo n.º 52
0
class DirectoryRecord(BaseDirectoryRecord):
    """
    OpenDirectory directory record.
    """

    log = Logger()

    # GUID is a required attribute for OD records.
    requiredFields = BaseDirectoryRecord.requiredFields + (
        BaseFieldName.guid, )

    def __init__(self, service, odRecord):
        details, error = odRecord.recordDetailsForAttributes_error_(None, None)

        if error:
            self.log.error("Error while reading OpenDirectory record: {error}",
                           error=error)
            raise OpenDirectoryDataError("Unable to read OpenDirectory record",
                                         error)

        def coerceType(fieldName, value):
            # Record type field value needs to be looked up
            if fieldName is service.fieldName.recordType:
                return ODRecordType.lookupByValue(value).recordType

            # Otherwise, cast to the valueType specified by the field name
            valueType = service.fieldName.valueType(fieldName)
            try:
                return valueType(value)
            except BaseException as e:
                raise OpenDirectoryDataError(
                    "Unable to coerce OD value {0!r} to type {1}: {2}".format(
                        value, valueType, e))

        fields = {}
        for name, values in details.iteritems():
            # Ignore attributes that we don't map to fields
            if name in (
                    # We get this attribute even though we did not ask for it
                    ODAttribute.metaRecordName.value,
                    # We fetch these attributes only to look for system accounts
                    ODAttribute.uniqueId.value,
                    ODAttribute.primaryGroupId.value,
                    ODAttribute.isHidden.value,
            ):
                continue

            try:
                attribute = ODAttribute.lookupByValue(name)
            except ValueError:
                self.log.debug(
                    "Unexpected OpenDirectory record attribute: {attribute}",
                    attribute=name)
                continue

            fieldName = attribute.fieldName

            if type(values) is bytes:
                values = (coerceType(fieldName, values), )
            else:
                values = tuple(coerceType(fieldName, v) for v in values)

            if service.fieldName.isMultiValue(fieldName):
                fields[fieldName] = values
            else:
                assert len(values) == 1
                fields[fieldName] = values[0]

        # Set uid from guid
        try:
            guid = fields[service.fieldName.guid]
        except KeyError:
            raise InvalidDirectoryRecordError("GUID field is required.",
                                              fields)

        fields[service.fieldName.uid] = unicode(guid).upper()

        super(DirectoryRecord, self).__init__(service, fields)
        self._odRecord = odRecord

    def __hash__(self):
        return hash(self.guid)

    def __eq__(self, other):
        if isinstance(other, self.__class__):
            return (self.service == other.service and self.guid == other.guid)
        return NotImplemented

    #
    # Verifiers for twext.who.checker stuff.
    #

    @inlineCallbacks
    def verifyPlaintextPassword(self, password):

        if DEFER_TO_THREAD:
            result, error = (yield deferToThreadWithAutoReleasePool(
                self._odRecord.verifyPassword_error_, password, None))
        else:
            result, error = self._odRecord.verifyPassword_error_(
                password, None)

        if error:
            returnValue(False)

        returnValue(result)

    @inlineCallbacks
    def verifyHTTPDigest(
        self,
        username,
        realm,
        uri,
        nonce,
        cnonce,
        algorithm,
        nc,
        qop,
        response,
        method,
    ):
        challenge = (
            'Digest realm="{realm}", nonce="{nonce}", algorithm={algorithm}'.
            format(realm=realm, nonce=nonce, algorithm=algorithm))

        if qop:
            responseTemplate = (
                'username="******",realm="{realm}",algorithm={algorithm},'
                'nonce="{nonce}",cnonce="{cnonce}",nc={nc},qop={qop},'
                'digest-uri="{uri}",response={response}')
        else:
            responseTemplate = ('Digest username="******", '
                                'realm="{realm}", '
                                'nonce="{nonce}", '
                                'uri="{uri}", '
                                'response="{response}",'
                                'algorithm={algorithm}')

        responseArg = responseTemplate.format(username=username,
                                              realm=realm,
                                              algorithm=algorithm,
                                              nonce=nonce,
                                              cnonce=cnonce,
                                              nc=nc,
                                              qop=qop,
                                              uri=uri,
                                              response=response)

        if DEFER_TO_THREAD:
            result, _ignore_m1, _ignore_m2, error = (
                yield deferToThreadWithAutoReleasePool(
                    self._odRecord.
                    verifyExtendedWithAuthenticationType_authenticationItems_continueItems_context_error_,
                    ODAuthMethod.digestMD5.value,
                    [username, challenge, responseArg, method], None, None,
                    None))
        else:
            result, _ignore_m1, _ignore_m2, error = self._odRecord.verifyExtendedWithAuthenticationType_authenticationItems_continueItems_context_error_(
                ODAuthMethod.digestMD5.value,
                [username, challenge, responseArg, method], None, None, None)

        if error:
            returnValue(False)

        returnValue(result)

    @inlineCallbacks
    def members(self):
        members = set()
        for uid in getattr(self, "memberUIDs", ()):
            members.add((yield self.service.recordWithUID(uid)))
        for uid in getattr(self, "nestedGroupsUIDs", ()):
            members.add((yield self.service.recordWithUID(uid)))
        returnValue(members)

    # @inlineCallbacks
    # FIXME: need to implement
    def groups(self):
        groups = set()
        return succeed(groups)
Exemplo n.º 53
0
Arquivo: acl.py Projeto: jrossi/twext
"""
WebDAV ACL method
"""

__all__ = ["http_ACL"]

from twisted.internet.defer import deferredGenerator, waitForDeferred

from twext.python.log import Logger
from twext.web2 import responsecode
from twext.web2.http import StatusResponse, HTTPError
from twext.web2.dav import davxml
from twext.web2.dav.http import ErrorResponse
from twext.web2.dav.util import davXMLFromStream

log = Logger()


def http_ACL(self, request):
    """
    Respond to a ACL request. (RFC 3744, section 8.1)
    """
    if not self.exists():
        log.err("File not found: %s" % (self,))
        yield responsecode.NOT_FOUND
        return

    #
    # Check authentication and access controls
    #
    x = waitForDeferred(self.authorize(request, (davxml.WriteACL(),)))
Exemplo n.º 54
0
from txweb2.http import HTTPError, StatusResponse

from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import caldav_namespace, MaxInstances, \
    CalendarTimeZone
from twistedcaldav.config import config
from txdav.common.icommondatastore import IndexedSearchException, \
    ConcurrentModification
from twistedcaldav.instance import TooManyInstancesError
from twistedcaldav.method import report_common

from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.icalendarstore import TimeRangeLowerLimit, TimeRangeUpperLimit
from txdav.xml import element as davxml

log = Logger()


@inlineCallbacks
def report_urn_ietf_params_xml_ns_caldav_calendar_query(self, request, calendar_query):
    """
    Generate a calendar-query REPORT.
    (CalDAV-access-09, section 7.6)
    """

    # Verify root element
    if calendar_query.qname() != (caldav_namespace, "calendar-query"):
        raise ValueError("{CalDAV:}calendar-query expected as root element, not %s." % (calendar_query.sname(),))

    if not self.isCollection():
        parent = (yield self.locateParent(request, request.uri))
Exemplo n.º 55
0
from txweb2 import responsecode
from txweb2.auth.wrapper import UnauthorizedResponse
from txweb2.dav.resource import TwistedACLInheritable
from txweb2.http import HTTPError, StatusResponse

from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.error import Error as WebError
from twisted.web.xmlrpc import Proxy, Fault

from twistedcaldav.config import config
from twistedcaldav.directory.directory import DirectoryService, \
    DirectoryRecord, UnknownRecordTypeError

from txdav.xml import element as davxml

log = Logger()

class WikiDirectoryService(DirectoryService):
    """
    L{IDirectoryService} implementation for Wikis.
    """
    baseGUID = "D79EF1E0-9A42-11DD-AD8B-0800200C9A66"

    realmName = None

    recordType_wikis = "wikis"

    UIDPrefix = "wiki-"


    def __repr__(self):
Exemplo n.º 56
0
class UpgradeToDatabaseStep(object):
    """
    Upgrade resources from a filesystem store to a database store.
    """
    log = Logger()

    def __init__(self, fileStore, sqlStore, uid=None, gid=None, merge=False):
        """
        Create an L{UpgradeToDatabaseStep} if there are still file-based
        calendar or addressbook homes remaining in the given path.

        @param sqlStore: the SQL storage service.

        @param merge: merge filesystem homes into SQL homes, rather than
            skipping them.

        @return: a service
        @rtype: L{IService}
        """

        self.fileStore = fileStore
        self.sqlStore = sqlStore
        self.uid = uid
        self.gid = gid
        self.merge = merge


    @classmethod
    def fileStoreFromPath(cls, path):
        """
        @param path: a path pointing at the document root, where the file-based
            data-store is located.
        @type path: L{CachingFilePath}
        """

        # TODO: TOPPATHS should be computed based on enabled flags in 'store',
        # not hard coded.
        for homeType in TOPPATHS:
            if path.child(homeType).exists():
                if platform.isMacOSX():
                    appropriateStoreClass = XattrPropertyStore
                else:
                    attrs = xattr.xattr(path.path)
                    try:
                        attrs.get('user.should-not-be-set')
                    except IOError, ioe:
                        if ioe.errno == errno.ENODATA:
                            # xattrs are supported and enabled on the filesystem
                            # where the calendar data lives.  this takes some
                            # doing (you have to edit fstab), so this means
                            # we're trying to migrate some 2.x data from a
                            # previous linux installation.
                            appropriateStoreClass = XattrPropertyStore
                        elif ioe.errno == errno.EOPNOTSUPP:
                            # The operation wasn't supported.  This is what will
                            # usually happen on a naively configured filesystem,
                            # so this means we're most likely trying to migrate
                            # some data from an untarred archive created on an
                            # OS X installation using xattrs.
                            appropriateStoreClass = AppleDoubleStore
                        else:
                            # No need to check for ENOENT and the like; we just
                            # checked above to make sure the parent exists.
                            # Other errors are not anticipated here, so fail
                            # fast.
                            raise

                    appropriateStoreClass = AppleDoubleStore

                return FileStore(
                    path, None, None, True, True,
                    propertyStoreClass=appropriateStoreClass)
        return None
Exemplo n.º 57
0
from cPickle import loads as unpickle, UnpicklingError

from twext.web2.dav.fileop import rmdir
from twext.web2.dav import davxml

from twext.python.log import Logger

from twistedcaldav.directory.directory import DirectoryService
#from twistedcaldav.directory.resourceinfo import ResourceInfoDatabase
from twistedcaldav.mail import MailGatewayTokensDatabase
from twistedcaldav.ical import Component
from twistedcaldav import caldavxml

from calendarserver.tools.util import getDirectory

log = Logger()

def getCalendarServerIDs(config):

    # Determine uid/gid for ownership of directories we create here
    uid = -1
    if config.UserName:
        try:
            uid = pwd.getpwnam(config.UserName).pw_uid
        except KeyError:
            log.error("User not found: %s" % (config.UserName,))

    gid = -1
    if config.GroupName:
        try:
            gid = grp.getgrnam(config.GroupName).gr_gid
Exemplo n.º 58
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function

from calendarserver.tools.cmdline import utilityMain, WorkerService
from argparse import ArgumentParser
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks
from twext.who.idirectory import RecordType
import time

log = Logger()


class DisplayAPNSubscriptions(WorkerService):

    users = []

    def doWork(self):
        rootResource = self.rootResource()
        directory = rootResource.getDirectory()
        return displayAPNSubscriptions(self.store, directory, rootResource,
                                       self.users)



def main():
Exemplo n.º 59
0
from txweb2.http import HTTPError, StatusResponse

from twistedcaldav import caldavxml
from twistedcaldav.caldavxml import caldav_namespace, MaxInstances, \
    CalendarTimeZone
from twistedcaldav.config import config
from txdav.common.icommondatastore import IndexedSearchException, \
    ConcurrentModification
from twistedcaldav.instance import TooManyInstancesError
from twistedcaldav.method import report_common

from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.icalendarstore import TimeRangeLowerLimit, TimeRangeUpperLimit
from txdav.xml import element as davxml

log = Logger()

@inlineCallbacks
def report_urn_ietf_params_xml_ns_caldav_calendar_query(self, request, calendar_query):
    """
    Generate a calendar-query REPORT.
    (CalDAV-access-09, section 7.6)
    """

    # Verify root element
    if calendar_query.qname() != (caldav_namespace, "calendar-query"):
        raise ValueError("{CalDAV:}calendar-query expected as root element, not %s." % (calendar_query.sname(),))

    if not self.isCollection():
        parent = (yield self.locateParent(request, request.uri))
        if not parent.isPseudoCalendarCollection():
Exemplo n.º 60
0
import os
import re
import sys
import base64
import itertools

from subprocess import Popen, PIPE, STDOUT
from hashlib import md5, sha1

from twisted.internet import ssl, reactor
from twisted.web import client
from twisted.python import failure
from twext.python.log import Logger

log = Logger()
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect

##
# System Resources (Memory size and processor count)
##

try:
    from ctypes import (
        cdll,
        c_int,
        c_uint64,
        c_ulong,
        c_char_p,
        c_void_p,