if not os.path.exists(config.DataRoot):
        makeDirsUserGroup(config.DataRoot, uid=uid, gid=gid)

    if os.path.exists(docRoot):

        # Look for the /principals/ directory on disk
        oldPrincipals = os.path.join(docRoot, "principals")
        if os.path.exists(oldPrincipals):
            # First move the proxy database and rename it
            doProxyDatabaseMoveUpgrade(config, uid=uid, gid=gid)

            # Now delete the on disk representation of principals
            rmdir(oldPrincipals)
            log.debug(
                "Removed the old principal directory at '%s'."
                % (oldPrincipals,)
            )

        calRoot = os.path.join(docRoot, "calendars")
        if os.path.exists(calRoot):

            uidHomes = os.path.join(calRoot, "__uids__")

            # Move calendar homes to new location:

            log.warn("Moving calendar homes to %s" % (uidHomes,))

            if os.path.exists(uidHomes):
                for home in os.listdir(uidHomes):

                    # MOR: This assumes no UID is going to be 2 chars or less
Exemple #2
0
                home,
                resource,
                self.icalendarUID,
                calendar_old,
                calendar_new,
                self.smartMerge
            )

            self._dequeued()

        except Exception, e:
            log.debug("ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUID, err=str(e))
            log.debug(traceback.format_exc())
            raise
        except:
            log.debug("ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUID)
            log.debug(traceback.format_exc())
            raise

        log.debug("ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUID, org=organizer)


class ScheduleOrganizerSendWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_ORGANIZER_SEND_WORK)):
    """
    @DynamicAttrs
    The associated work item table is SCHEDULE_ORGANIZER_SEND_WORK.

    This work item is used to send iTIP request and cancel messages when an organizer changes
    their calendar object resource. One of these will be created for each iTIP message that
    L{ScheduleOrganizerWork} needs to have sent.
    """
Exemple #3
0
    @inlineCallbacks
    def doWork(self):
        groupCacher = getattr(self.transaction, "_groupCacher", None)
        if groupCacher is not None:

            try:
                yield groupCacher.refreshGroup(self.transaction,
                                               self.groupUID.decode("utf-8"))
            except Exception, e:
                log.error("Failed to refresh group {group} {err}",
                          group=self.groupUID,
                          err=e)

        else:
            log.debug("Rescheduling group refresh for {group}: {when}",
                      group=self.groupUID,
                      when=datetime.datetime.utcnow() +
                      datetime.timedelta(seconds=10))
            yield self.reschedule(self.transaction, 10, groupUID=self.groupUID)


class GroupDelegateChangesWork(AggregatedWorkItem,
                               fromTable(schema.GROUP_DELEGATE_CHANGES_WORK)):

    group = property(lambda self:
                     (self.table.DELEGATOR_UID == self.delegatorUID))

    @inlineCallbacks
    def doWork(self):
        groupCacher = getattr(self.transaction, "_groupCacher", None)
        if groupCacher is not None:
            access = (yield method(userID, wikiID,
                host=wikiConfig.CollabHost, port=wikiConfig.CollabPort))

        log.debug("Wiki ACL result: user [%s], wiki [%s], access [%s]" %
            (userID, wikiID, access))
        returnValue(access)

    except MultiFailure, e:
        log.error("Wiki ACL error: user [%s], wiki [%s], MultiFailure [%s]" %
            (userID, wikiID, e))
        raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE,
            "\n".join([str(f) for f in e.failures])))

    except Fault, fault:

        log.debug("Wiki ACL result: user [%s], wiki [%s], FAULT [%s]" % (userID,
            wikiID, fault))

        if fault.faultCode == 2: # non-existent user
            raise HTTPError(StatusResponse(responsecode.FORBIDDEN,
                fault.faultString))

        elif fault.faultCode == 12: # non-existent wiki
            raise HTTPError(StatusResponse(responsecode.NOT_FOUND,
                fault.faultString))

        else:
            # Unknown fault returned from wiki server.  Log the error and
            # return 503 Service Unavailable to the client.
            log.error("Wiki ACL error: user [%s], wiki [%s], FAULT [%s]" %
                (userID, wikiID, fault))
            raise HTTPError(StatusResponse(responsecode.SERVICE_UNAVAILABLE,
Exemple #5
0
                yield groupCacher.refreshGroup(
                    self.transaction, self.groupUid.decode("utf-8")
                )
            except Exception, e:
                log.error(
                    "Failed to refresh group {group} {err}",
                    group=self.groupUid, err=e
                )

        else:
            notBefore = (
                datetime.datetime.utcnow() +
                datetime.timedelta(seconds=10)
            )
            log.debug(
                "Rescheduling group refresh for {group}: {when}",
                group=self.groupUid, when=notBefore
            )
            yield self.transaction.enqueue(
                GroupRefreshWork,
                groupUID=self.groupUid, notBefore=notBefore
            )



class GroupAttendeeReconciliationWork(
    WorkItem, fromTable(schema.GROUP_ATTENDEE_RECONCILE_WORK)
):

    group = property(
        lambda self: (self.table.RESOURCE_ID == self.resourceID)
    )
    _initResolver()

    lookup = "%s._tcp.%s" % (service, domain,)
    log.debug("DNS SRV: lookup: %s" % (lookup,))
    try:
        answers = (yield DebugResolver.lookupService(lookup))[0]
    except (DomainError, AuthoritativeDomainError), e:
        log.debug("DNS SRV: lookup failed: %s" % (e,))
        returnValue(None)

    if len(answers) == 1 and answers[0].type == dns.SRV \
                         and answers[0].payload \
                         and answers[0].payload.target == dns.Name('.'):
        # decidedly not available
        log.debug("DNS SRV: disabled: %s" % (lookup,))
        returnValue(None)

    servers = []
    for a in answers:

        if a.type != dns.SRV or not a.payload:
            continue

        servers.append((a.payload.priority, a.payload.weight, str(a.payload.target), a.payload.port))

    log.debug("DNS SRV: lookup results: %s\n%s" % (lookup, servers,))
    if len(servers) == 0:
        returnValue(None)

                (segments[1] == "__uids__" and segments[2].startswith("wiki-"))
            )
        ):
            # This is a wiki-related calendar resource. SACLs are not checked.
            request.checkedSACL = True

            # The authzuser value is set to that of the wiki principal if
            # not already set.
            if not hasattr(request, "authzUser"):
                wikiName = None
                if segments[1] == "wikis":
                    wikiName = segments[2]
                else:
                    wikiName = segments[2][5:]
                if wikiName:
                    log.debug("Wiki principal %s being assigned to authzUser" % (wikiName,))
                    request.authzUser = davxml.Principal(
                        davxml.HRef.fromString("/principals/wikis/%s/" % (wikiName,))
                    )

        elif self.useSacls and not hasattr(request, "checkedSACL") and not hasattr(request, "checkingSACL"):
            yield self.checkSacl(request)

        if config.RejectClients:
            #
            # Filter out unsupported clients
            #
            agent = request.headers.getHeader("user-agent")
            if agent is not None:
                for reject in config.RejectClients:
                    if reject.search(agent) is not None:
Exemple #8
0
                home,
                resource,
                self.icalendarUid,
                calendar_old,
                calendar_new,
                self.smartMerge
            )

            self._dequeued()

        except Exception, e:
            log.debug("ScheduleOrganizerWork - exception ID: {id}, UID: '{uid}', {err}", id=self.workID, uid=self.icalendarUid, err=str(e))
            log.debug(traceback.format_exc())
            raise
        except:
            log.debug("ScheduleOrganizerWork - bare exception ID: {id}, UID: '{uid}'", id=self.workID, uid=self.icalendarUid)
            log.debug(traceback.format_exc())
            raise

        log.debug("ScheduleOrganizerWork - done for ID: {id}, UID: {uid}, organizer: {org}", id=self.workID, uid=self.icalendarUid, org=organizer)



class ScheduleOrganizerSendWork(ScheduleWorkMixin, fromTable(schema.SCHEDULE_ORGANIZER_SEND_WORK)):
    """
    The associated work item table is SCHEDULE_ORGANIZER_SEND_WORK.

    This work item is used to send iTIP request and cancel messages when an organizer changes
    their calendar object resource. One of these will be created for each iTIP message that
    L{ScheduleOrganizerWork} needs to have sent.
    """
Exemple #9
0
    _initResolver()

    lookup = "{}._tcp.{}".format(service, domain,)
    log.debug("DNS SRV: lookup: {l}", l=lookup)
    try:
        answers = (yield DebugResolver.lookupService(lookup))[0]
    except (DomainError, AuthoritativeDomainError), e:
        log.debug("DNS SRV: lookup failed: {exc}", exc=e)
        returnValue(None)

    if (
        len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload
        and answers[0].payload.target == dns.Name('.')
    ):
        # decidedly not available
        log.debug("DNS SRV: disabled: {l}", l=lookup)
        returnValue(None)

    servers = []
    for a in answers:

        if a.type != dns.SRV or not a.payload:
            continue

        servers.append((a.payload.priority, a.payload.weight, str(a.payload.target), a.payload.port))

    log.debug("DNS SRV: lookup results: {l}\n{s}", l=lookup, s=servers)


    def _serverCmp(a, b):
        if a[0] != b[0]:
Exemple #10
0
    lookup = "{}._tcp.{}".format(
        service,
        domain,
    )
    log.debug("DNS SRV: lookup: {l}", l=lookup)
    try:
        answers = (yield DebugResolver.lookupService(lookup))[0]
    except (DomainError, AuthoritativeDomainError), e:
        log.debug("DNS SRV: lookup failed: {exc}", exc=e)
        returnValue(None)

    if (len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload
            and answers[0].payload.target == dns.Name('.')):
        # decidedly not available
        log.debug("DNS SRV: disabled: {l}", l=lookup)
        returnValue(None)

    servers = []
    for a in answers:

        if a.type != dns.SRV or not a.payload:
            continue

        servers.append((a.payload.priority, a.payload.weight,
                        str(a.payload.target), a.payload.port))

    log.debug("DNS SRV: lookup results: {l}\n{s}", l=lookup, s=servers)

    def _serverCmp(a, b):
        if a[0] != b[0]:
Exemple #11
0
        if groupCacher is not None:

            try:
                yield groupCacher.refreshGroup(
                    self.transaction, self.groupUID.decode("utf-8")
                )
            except Exception, e:
                log.error(
                    "Failed to refresh group {group} {err}",
                    group=self.groupUID, err=e
                )

        else:
            log.debug(
                "Rescheduling group refresh for {group}: {when}",
                group=self.groupUID,
                when=datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
            )
            yield self.reschedule(self.transaction, 10, groupUID=self.groupUID)



class GroupDelegateChangesWork(AggregatedWorkItem, fromTable(schema.GROUP_DELEGATE_CHANGES_WORK)):

    group = property(lambda self: (self.table.DELEGATOR_UID == self.delegatorUID))

    @inlineCallbacks
    def doWork(self):
        groupCacher = getattr(self.transaction, "_groupCacher", None)
        if groupCacher is not None:
            # inNewTransaction wipes out the remembered resource<-> URL mappings in the
            # request object but we need to be able to map the actual reply resource to its
            # URL when doing auto-processing, so we have to sneak that mapping back in here.
            txn = yield self.organizer_calendar_resource.inNewTransaction(self.request, label="Delayed attendee refresh")

            try:
                organizer_resource = (yield self.request.locateResource(self.organizer_calendar_resource._url))
                if organizer_resource.exists():
                    yield self._doRefresh(organizer_resource, only_attendees=attendeesToProcess)
                else:
                    log.debug("ImplicitProcessing - skipping refresh of missing UID: '%s'" % (self.uid,))
            except Exception, e:
                log.debug("ImplicitProcessing - refresh exception UID: '%s', %s" % (self.uid, str(e)))
                yield txn.abort()
            except:
                log.debug("ImplicitProcessing - refresh bare exception UID: '%s'" % (self.uid,))
                yield txn.abort()
            else:
                yield txn.commit()
        finally:
            yield uidlock.clean()


    def _enqueueBatchRefresh(self):
        """
        Mostly here to help unit test by being able to stub this out.
        """
        reactor.callLater(config.Scheduling.Options.AttendeeRefreshBatchDelaySeconds, self._doBatchRefresh)


    @inlineCallbacks
Exemple #13
0
        groupCacher = getattr(self.transaction, "_groupCacher", None)
        if groupCacher is not None:

            try:
                yield groupCacher.refreshGroup(self.transaction,
                                               self.groupUid.decode("utf-8"))
            except Exception, e:
                log.error("Failed to refresh group {group} {err}",
                          group=self.groupUid,
                          err=e)

        else:
            notBefore = (datetime.datetime.utcnow() +
                         datetime.timedelta(seconds=10))
            log.debug("Rescheduling group refresh for {group}: {when}",
                      group=self.groupUid,
                      when=notBefore)
            yield self.transaction.enqueue(GroupRefreshWork,
                                           groupUID=self.groupUid,
                                           notBefore=notBefore)


class GroupAttendeeReconciliationWork(
        WorkItem, fromTable(schema.GROUP_ATTENDEE_RECONCILE_WORK)):

    group = property(lambda self: (self.table.RESOURCE_ID == self.resourceID))

    @inlineCallbacks
    def doWork(self):

        # Delete all other work items for this event
Exemple #14
0
    if not os.path.exists(config.DataRoot):
        makeDirsUserGroup(config.DataRoot, uid=uid, gid=gid)

    if os.path.exists(docRoot):

        # Look for the /principals/ directory on disk
        oldPrincipals = os.path.join(docRoot, "principals")
        if os.path.exists(oldPrincipals):
            # First move the proxy database and rename it
            doProxyDatabaseMoveUpgrade(config, uid=uid, gid=gid)

            # Now delete the on disk representation of principals
            rmdir(oldPrincipals)
            log.debug(
                "Removed the old principal directory at '%s'."
                % (oldPrincipals,)
            )

        calRoot = os.path.join(docRoot, "calendars")
        if os.path.exists(calRoot):

            uidHomes = os.path.join(calRoot, "__uids__")

            # Move calendar homes to new location:

            log.warn("Moving calendar homes to %s" % (uidHomes,))

            if os.path.exists(uidHomes):
                for home in os.listdir(uidHomes):

                    # MOR: This assumes no UID is going to be 2 chars or less
Exemple #15
0
    if not os.path.exists(config.DataRoot):
        makeDirsUserGroup(config.DataRoot, uid=uid, gid=gid)

    if os.path.exists(docRoot):

        # Look for the /principals/ directory on disk
        oldPrincipals = os.path.join(docRoot, "principals")
        if os.path.exists(oldPrincipals):
            # First move the proxy database and rename it
            doProxyDatabaseMoveUpgrade(config, uid=uid, gid=gid)

            # Now delete the on disk representation of principals
            rmdir(oldPrincipals)
            log.debug(
                "Removed the old principal directory at '{path}'.",
                path=oldPrincipals,
            )

        calRoot = os.path.join(docRoot, "calendars")
        if os.path.exists(calRoot):

            uidHomes = os.path.join(calRoot, "__uids__")

            # Move calendar homes to new location:

            log.warn("Moving calendar homes to {path}", path=uidHomes)

            if os.path.exists(uidHomes):
                for home in os.listdir(uidHomes):

                    # MOR: This assumes no UID is going to be 2 chars or less