Пример #1
0
class ClosureNameHolder(storagetypes.Object):
    '''
   Mark a closure's name as in use
   '''

    name = storagetypes.String()
    closure_id = storagetypes.Integer()

    required_attrs = ["name"]

    @classmethod
    def make_key_name(cls, name):
        return "ClosureNameHolder: name=%s" % (name)

    @classmethod
    def create_async(cls, _name, _id):
        return ClosureNameHolder.get_or_insert_async(
            ClosureNameHolder.make_key_name(_name), name=_name, closure_id=_id)
Пример #2
0
class VolumeNameHolder(storagetypes.Object):
    '''
   Mark a Volume name as taken
   '''

    name = storagetypes.String()
    volume_id = storagetypes.Integer()

    required_attrs = ["name"]

    @classmethod
    def make_key_name(cls, name):
        return "VolumeNameHolder: name=%s" % (name)

    @classmethod
    def create_async(cls, _name, _id):
        return VolumeNameHolder.get_or_insert_async(
            VolumeNameHolder.make_key_name(_name), name=_name, volume_id=_id)
Пример #3
0
class GatewayNameHolder(storagetypes.Object):
    '''
   Mark a Gateway's name as in use
   '''

    name = storagetypes.String()
    g_id = storagetypes.Integer()

    required_attrs = ["name"]

    @classmethod
    def make_key_name(cls, name):
        return "GatewayNameHolder: name=%s" % (name)

    @classmethod
    def create_async(cls, _name, _id):
        return GatewayNameHolder.get_or_insert_async(
            GatewayNameHolder.make_key_name(_name), name=_name, g_id=_id)
Пример #4
0
class SyndicateUserNameHolder(storagetypes.Object):
    '''
   Mark a SyndicateUser email as taken
   '''

    email = storagetypes.String()
    owner_id = storagetypes.Integer()

    required_attrs = ["email"]

    @classmethod
    def make_key_name(cls, email):
        return "SyndicateUserNameHolder: email=%s" % (email)

    @classmethod
    def create_async(cls, _email, _id):
        return SyndicateUserNameHolder.get_or_insert_async(
            SyndicateUserNameHolder.make_key_name(_email),
            email=_email,
            owner_id=_id)
Пример #5
0
class SyndicateUser(storagetypes.Object):

    USER_KEY_UNSET = "unset"
    USER_KEY_UNUSED = "unused"

    email = storagetypes.String()  # used as the username
    owner_id = storagetypes.Integer()  # UID field in Syndicate
    openid_url = storagetypes.Text()  # OpenID identifying URL

    max_volumes = storagetypes.Integer(
        default=10
    )  # how many Volumes can this user create? (-1 means unlimited)
    max_UGs = storagetypes.Integer(
        default=10)  # how many UGs can this user create?
    max_RGs = storagetypes.Integer(
        default=10)  # how many RGs can this user create?
    max_AGs = storagetypes.Integer(
        default=10)  # how many AGs can this user create?
    max_requests = storagetypes.Integer(
        default=10)  # how many pending Volume requests can this user create?

    is_admin = storagetypes.Boolean(
        default=False, indexed=False)  # is this user an administrator?

    signing_public_key = storagetypes.Text(
    )  # PEM-encoded public key for authenticating this user, or USER_KEY_UNSET if it is not set, or USER_KEY_UNUSED if it will not be used
    signing_public_key_expiration = storagetypes.Integer(
        default=-1)  # seconds since the epoch

    active = storagetypes.Boolean(default=False)  # is this account active?
    allow_password_auth = storagetypes.Boolean(
        default=True)  # allow password-based authentication?

    # one-time password for setting the signing public key
    activate_password_salt = storagetypes.String(
    )  # 32 bytes, but encoded as a hex string
    activate_password_hash = storagetypes.String()  # SHA256

    # for RPC
    key_type = "user"

    required_attrs = ["email", "openid_url", "signing_public_key_expiration"]

    key_attrs = ["email"]

    default_values = {
        "max_volumes": (lambda cls, attrs: 10),
        "max_UGs": (lambda cls, attrs: 10),
        "max_RGs": (lambda cls, attrs: 10),
        "max_AGs": (lambda cls, attrs: 10),
        "is_admin": (lambda cls, attrs: False),
        "openid_url": (lambda cls, attrs: ""),
        "signing_public_key_expiration": (lambda cls, attrs: -1),
        "active": (lambda cls, attrs: False),
        "allow_password_auth": (lambda cls, attrs: True)
    }

    validators = {
        "email": (lambda cls, value: valid_email(cls, value)),
        "signing_public_key":
        (lambda cls, value: not cls.is_signing_public_key_set(value) or cls.
         is_valid_key(value, USER_RSA_KEYSIZE)),
        "openid_url":
        (lambda cls, value: len(value) < 4096),  # not much of a check here...
        "activate_password_salt": (lambda cls, value: len(
            str(value).translate(None, "0123456789abcdefABCDEF")) == 0 and len(
                str(value)) == 64),  # 32-byte salt, encoded as a hex number
        "activate_password_hash":
        (lambda cls, value:
         len(str(value).translate(None, "0123456789abcdefABCDEF")
             ) == 0 and len(str(value)) == 64
         )  # SHA256: 32-byte hash, encoded as a hex number
    }

    read_attrs_api_required = [
        "email",
        "owner_id",
        "openid_url",
        "max_volumes",
        "max_UGs",
        "max_RGs",
        "max_AGs",
        "signing_public_key",
        "signing_public_key_expiration",
    ]

    read_attrs = read_attrs_api_required

    write_attrs_api_required = [
        "openid_url", "signing_public_key", "allow_password_auth"
    ]

    write_attrs_admin_required = [
        "max_volumes", "max_UGs", "max_RGs", "max_AGs", "is_admin"
    ]

    write_attrs = write_attrs_api_required + write_attrs_admin_required

    def owned_by(self, user):
        return user.owner_id == self.owner_id

    @classmethod
    def Authenticate(cls, email, data, data_signature):
        """
      Authenticate a user via public-key cryptography.
      Verify that data was signed by the user's private key, given the signature and data.
      (use RSA PSS for security).
      Return the user on success; False on authentication error; None if the user doesn't exist
      """
        user = SyndicateUser.Read(email)
        if user == None:
            return None

        if not SyndicateUser.is_signing_public_key_set(
                user.signing_public_key):
            logging.error("Key for %s is not set or unused" % email)
            return None

        ret = cls.auth_verify(user.signing_public_key, data, data_signature)
        if not ret:
            logging.error("Verification failed for %s" % email)
            return False

        else:
            return user

    def makeCert(self):
        ret = {}
        ret['expires'] = self.signing_public_key_expiration
        ret['pubkey'] = self.signing_public_key
        ret['email'] = self.email
        ret['openid_url'] = self.openid_url

        return ret

    @classmethod
    def Create(cls, email, **kwargs):
        """
      Create a SyndicateUser.
      
      Required arguments:
      email                 -- Email address of the user.  Serves as the username (str)
      openid_url            -- OpenID identifier for authenticating this user (str)
      """

        kwargs['email'] = email

        # sanity check
        SyndicateUser.fill_defaults(kwargs)

        # if we're given a signing public key, then set it.
        # otherwise, use the given salted password hash.
        skip_verify = []
        if kwargs.has_key('activate_password_hash') and kwargs.has_key(
                'activate_password_salt'):
            # don't check for this
            skip_verify = ['signing_public_key']
            kwargs['signing_public_key'] = cls.USER_KEY_UNSET

        elif kwargs.has_key('signing_public_key'):
            # this had better be a valid key
            if not SyndicateUser.validators['signing_public_key'](
                    SyndicateUser, kwargs['signing_public_key']):
                raise Exception("Invalid field: %s" % 'signing_public_key')

            # don't check for password hash and salt
            skip_verify = ['activate_password_hash', 'activate_password_salt']

        else:
            # need either of these...
            raise Exception(
                "Need either signing_public_key or (activate_password_hash, activate_password_salt)"
            )

        missing = SyndicateUser.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        invalid = SyndicateUser.validate_fields(kwargs, skip=skip_verify)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        user_key_name = SyndicateUser.make_key_name(email=email)
        user = storagetypes.memcache.get(user_key_name)
        if user == None:
            user_key = storagetypes.make_key(SyndicateUser, user_key_name)
            user = user_key.get()

            if user == None:

                # do not allow admin privileges
                kwargs['is_admin'] = False
                kwargs['owner_id'] = random.randint(1, 2**63 - 1)
                user_key_name = SyndicateUser.make_key_name(email=email)

                user = SyndicateUser.get_or_insert(user_key_name, **kwargs)

                # check for collisions
                if user.owner_id != kwargs['owner_id']:
                    # collision
                    raise Exception("User '%s' already exists" % email)

                return user.key

            else:
                raise Exception("User '%s' already exists" % email)

        else:
            raise Exception("User '%s' already exists" % email)

    @classmethod
    def CreateAdmin(cls, email, openid_url, signing_public_key,
                    activate_password):
        """
      Create the Admin user.  NOTE: this will be called repeatedly, so use memcache
      """

        user_key_name = SyndicateUser.make_key_name(email=email)
        user = storagetypes.memcache.get(user_key_name)

        if user == None:
            user_key = storagetypes.make_key(SyndicateUser, user_key_name)
            user = user_key.get()

            if user == None:
                # admin does not exist
                attrs = {}

                logging.info("Generating admin '%s'" % email)

                # fill defaults
                SyndicateUser.fill_defaults(attrs)

                attrs['email'] = email
                attrs['openid_url'] = openid_url
                attrs['owner_id'] = random.randint(1, 2**63 - 1)
                attrs['is_admin'] = True

                # generate password hash and salt
                import common.api as api
                pw_salt = api.password_salt()
                pw_hash = api.hash_password(activate_password, pw_salt)

                attrs['activate_password_hash'] = pw_hash
                attrs['activate_password_salt'] = pw_salt

                # possible that we haven't set the public key yet
                if not signing_public_key or len(signing_public_key) == 0:
                    signing_public_key = cls.USER_KEY_UNSET

                attrs['signing_public_key'] = signing_public_key

                invalid = SyndicateUser.validate_fields(attrs)
                if len(invalid) != 0:
                    raise Exception("Invalid values for fields: %s" %
                                    (", ".join(invalid)))

                user = SyndicateUser.get_or_insert(user_key_name, **attrs)

                # check for collisions
                if user.owner_id != attrs['owner_id']:
                    # collision
                    logging.warning("Admin '%s' already exists" % email)

            storagetypes.memcache.set(user_key_name, user)

        return user.key

    @classmethod
    def Read(cls, email_or_owner_id, async=False):
        """
      Read a SyndicateUser
      
      Arguments:
      email_or_owner_id         -- Email address of the user to read, or the owner ID (str or int)
      """
        owner_id = None
        email = None

        try:
            owner_id = int(email_or_owner_id)
        except:
            email = email_or_owner_id

        if owner_id != None:
            return cls.Read_ByOwnerID(owner_id, async=async)

        user_key_name = SyndicateUser.make_key_name(email=email)
        user_key = storagetypes.make_key(SyndicateUser, user_key_name)

        user = storagetypes.memcache.get(user_key_name)
        if user == None:
            if async:
                return user_key.get_async(use_memcache=False)

            else:
                user = user_key.get(use_memcache=False)
                if not user:
                    return None
                else:
                    storagetypes.memcache.set(user_key_name, user)

        elif async:
            user = storagetypes.FutureWrapper(user)

        return user
Пример #6
0
class VolumeAccessRequest(storagetypes.Object):
    """
   This object controls what kinds of Gateways a user can create within a Volume,
   and what capabilities they are allowed to have.
   """

    STATUS_PENDING = 1
    STATUS_GRANTED = 2

    requester_owner_id = storagetypes.Integer()  # owner id of the requester
    request_message = storagetypes.Text()  # message to the owner
    volume_id = storagetypes.Integer()  # ID of volume to join
    gateway_caps = storagetypes.Integer(
        indexed=False
    )  # gateway capabilities requested (only apply to User Gateways)
    nonce = storagetypes.Integer(
        indexed=False)  # detect collision with another one of these
    request_timestamp = storagetypes.Integer()  # when was the request made?
    status = storagetypes.Integer()  # granted or pending?
    allowed_gateways = storagetypes.Integer(
        default=0)  # bit vector representing GATEWAY_TYPE_*G (from msconfig)

    # purely for readability
    volume_name = storagetypes.String()

    required_attrs = [
        "requester_owner_id", "nonce", "request_timestamp", "status",
        "volume_name", "volume_id"
    ]

    read_attrs = [
        "requester_owner_id", "request_message", "volume_id", "gateway_caps",
        "request_timestamp", "status", "volume_name", "allowed_gateways"
    ]

    read_attrs_api_required = read_attrs

    def owned_by(self, user):
        return (user.owner_id == self.requester_owner_id)

    @classmethod
    def make_key_name(cls, requester_owner_id, volume_id):
        return "VolumeAccessRequest: owner_id=%s,volume_id=%s" % (
            requester_owner_id, volume_id)

    @classmethod
    def create_async(cls, _requester_owner_id, _volume_id, _volume_name,
                     _nonce, _status, **attrs):
        ts = int(storagetypes.get_time())
        return VolumeAccessRequest.get_or_insert_async(
            VolumeAccessRequest.make_key_name(_requester_owner_id, _volume_id),
            requester_owner_id=_requester_owner_id,
            volume_id=_volume_id,
            nonce=_nonce,
            request_timestamp=ts,
            status=_status,
            volume_name=_volume_name,
            **attrs)

    @classmethod
    def RequestAccess(cls, owner_id, volume_id, volume_name, allowed_gateways,
                      gateway_caps, message):
        """
      Create a request that a particular user be allowed to provision Gateways for a particular Volume.
      If User Gateways are allowed, then gateway_caps controls what those capabilities are allowed to be.
      Include a message that the Volume owner will be able to read.
      
      Return if the request was successfully placed.
      Raise an exception if there is already a pending request.
      """

        nonce = random.randint(-2**63, 2**63 - 1)
        req_fut = VolumeAccessRequest.create_async(
            owner_id,
            volume_id,
            volume_name,
            nonce,
            VolumeAccessRequest.STATUS_PENDING,
            request_message=message,
            gateway_caps=gateway_caps,
            allowed_gateways=allowed_gateways)
        req = req_fut.get_result()

        # duplicate?
        if req.nonce != nonce:
            raise Exception("User already attempted to join Volume '%s'" %
                            (owner_id, volume_name))

        return True

    @classmethod
    def GrantAccess(cls,
                    owner_id,
                    volume_id,
                    volume_name,
                    allowed_gateways=None,
                    gateway_caps=None):
        """
      Allow a given user to create Gateways within a given Volume, subject to given capabilities.
      """

        # verify the arguments are valid
        if allowed_gateways is not None and (allowed_gateways & ~(
            (1 << GATEWAY_TYPE_UG) | (1 << GATEWAY_TYPE_RG) |
            (1 << GATEWAY_TYPE_AG))) != 0:
            # extra bits
            raise Exception("Invalid bit field for allowed_gateways (%x)" %
                            (allowed_gateways))

        if gateway_caps is not None and (
                gateway_caps
                & ~(GATEWAY_CAP_READ_DATA | GATEWAY_CAP_READ_METADATA
                    | GATEWAY_CAP_WRITE_DATA | GATEWAY_CAP_WRITE_METADATA
                    | GATEWAY_CAP_COORDINATE)) != 0:
            # extra bits
            raise Exception("Invalid bit field for gateway_caps (%x)" %
                            (gateway_caps))

        nonce = random.randint(-2**63, 2**63 - 1)
        req_fut = VolumeAccessRequest.create_async(
            owner_id,
            volume_id,
            volume_name,
            nonce,
            VolumeAccessRequest.STATUS_GRANTED,
            request_message="",
            gateway_caps=gateway_caps,
            allowed_gateways=allowed_gateways)
        req = req_fut.get_result()

        if req.nonce != nonce:
            # Request existed. update and put

            if gateway_caps != None:
                req.gateway_caps = gateway_caps

            if allowed_gateways != None:
                req.allowed_gateways = allowed_gateways

            req.status = VolumeAccessRequest.STATUS_GRANTED
            req.put()

        req_key_name = VolumeAccessRequest.make_key_name(owner_id, volume_id)
        storagetypes.memcache.delete(req_key_name)

        return True

    @classmethod
    def GetAccess(cls, owner_id, volume_id):
        """
      Get the access status of a user in a Volume.
      """

        req_key_name = VolumeAccessRequest.make_key_name(owner_id, volume_id)
        req = storagetypes.memcache.get(req_key_name)

        if req != None:
            return req

        req_key = storagetypes.make_key(VolumeAccessRequest, req_key_name)

        req = req_key.get()
        if req != None:
            storagetypes.memcache.set(req_key_name, req)

        return req

    @classmethod
    def RemoveAccessRequest(cls, owner_id, volume_id):
        """
      Delete an access request.
      """

        req_key_name = VolumeAccessRequest.make_key_name(owner_id, volume_id)
        req_key = storagetypes.make_key(VolumeAccessRequest, req_key_name)
        storagetypes.deferred.defer(cls.delete_all, [req_key])
        storagetypes.memcache.delete(req_key_name)

        return True

    @classmethod
    def ListUserAccessRequests(cls, owner_id, **q_opts):

        return VolumeAccessRequest.ListAll(
            {"VolumeAccessRequest.requester_owner_id ==": owner_id}, **q_opts)

    @classmethod
    def ListVolumeAccessRequests(cls, volume_id, **q_opts):

        return VolumeAccessRequest.ListAll(
            {
                "VolumeAccessRequest.volume_id ==": volume_id,
                "VolumeAccessRequest.status ==":
                VolumeAccessRequest.STATUS_PENDING
            }, **q_opts)

    @classmethod
    def ListVolumeAccess(cls, volume_id, **q_opts):

        return VolumeAccessRequest.ListAll(
            {
                "VolumeAccessRequest.volume_id ==": volume_id,
                "VolumeAccessRequest.status ==":
                VolumeAccessRequest.STATUS_GRANTED
            }, **q_opts)

    @classmethod
    def DeleteAccessRequestsByVolume(cls, volume_id, async=False):
        def __delete_var(var):
            req_key_name = VolumeAccessRequest.make_key_name(
                var.requester_owner_id, volume_id)
            storagetypes.memcache.delete(req_key_name)
            var.key.delete()

        return VolumeAccessRequest.ListAll(
            {"VolumeAccessRequest.volume_id ==": volume_id},
            map_func=__delete_var,
            projection=['requester_owner_id'],
            async=async)
Пример #7
0
class Volume(storagetypes.Object):

    name = storagetypes.String()
    blocksize = storagetypes.Integer(indexed=False)  # Stored in bytes!!
    active = storagetypes.Boolean()
    description = storagetypes.Text()
    owner_id = storagetypes.Integer()
    volume_id = storagetypes.Integer()

    version = storagetypes.Integer(
        indexed=False)  # version of this Volume's metadata
    cert_version = storagetypes.Integer(
        indexed=False)  # certificate bundle version

    private = storagetypes.Boolean(
    )  # if True, then this Volume won't be listed
    archive = storagetypes.Boolean(
    )  # only an authenticated AG owned by the same user that owns this Volume can write to this Volume
    allow_anon = storagetypes.Boolean(
    )  # if True, then anonymous users can access this Volume (i.e. users who don't have to log in)

    num_shards = storagetypes.Integer(
        default=20, indexed=False)  # number of shards per entry in this volume

    metadata_public_key = storagetypes.Text(
    )  # Volume public key, in PEM format, for verifying metadata
    metadata_private_key = storagetypes.Text(
    )  # Volume private key, in PEM format, for signing metadata

    file_quota = storagetypes.Integer(
    )  # maximum number of files allowed here (-1 means unlimited)

    deleted = storagetypes.Boolean()  # is this Volume deleted?

    default_gateway_caps = storagetypes.Integer(indexed=False)

    closure = storagetypes.Text(
    )  # base64-encoded closure for connecting to the cache providers

    # for RPC
    key_type = "volume"

    @classmethod
    def generate_metadata_keys(cls):
        """
      Generate metadata public/private keys for metadata sign/verify
      """
        return cls.generate_keys(VOLUME_RSA_KEYSIZE)

    required_attrs = [
        "name",
        "blocksize",
        "owner_id",
        "private",
        "metadata_private_key",
        "default_gateway_caps",
    ]

    key_attrs = ["volume_id"]

    validators = {
        "name": (lambda cls, value: len(
            unicode(value).translate(
                dict(
                    (ord(char), None) for char in
                    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-. "
                ))) == 0 and not is_int(value)),
        "metadata_public_key":
        (lambda cls, value: cls.is_valid_key(value, VOLUME_RSA_KEYSIZE)),
        "metadata_private_key":
        (lambda cls, value: cls.is_valid_key(value, VOLUME_RSA_KEYSIZE))
    }

    default_values = {
        "blocksize": (lambda cls, attrs: 61440),  # 60 KB
        "version": (lambda cls, attrs: 1),
        "cert_version": (lambda cls, attrs: 1),
        "private": (lambda cls, attrs: True),
        "archive": (lambda cls, attrs: False),
        "allow_anon": (lambda cls, attrs: False),
        "active": (lambda cls, attrs: True),
        "file_quota": (lambda cls, attrs: -1),
        "deleted": (lambda cls, attrs: False),
        "num_shards": (lambda cls, attrs: 20),
        "default_gateway_caps":
        (lambda cls, attrs: GATEWAY_CAP_READ_METADATA | GATEWAY_CAP_READ_DATA
         )  # read only
    }

    read_attrs_api_required = [
        "blocksize", "active", "version", "cert_version", "private", "archive",
        "allow_anon", "file_quota", "default_gateway_caps", "closure"
    ]

    read_attrs = [
        "name",
        "description",
        "owner_id",
        "volume_id",
        "metadata_public_key",
    ] + read_attrs_api_required

    write_attrs = [
        "active", "description", "private", "archive", "file_quota",
        "default_gateway_caps", "allow_anon", "closure"
    ]

    write_attrs_api_required = write_attrs

    def owned_by(self, user):
        return user.owner_id == self.owner_id

    def need_gateway_auth(self):
        """
      Do we require an authentic gateway to interact with us?
      (i.e. do we forbid anonymous users)?
      """
        if not self.allow_anon:
            return True

        return False

    def protobuf(self, volume_metadata, **kwargs):
        """
      Convert to a protobuf (ms_volume_metadata)
      """

        volume_metadata.owner_id = kwargs.get('owner_id', self.owner_id)
        volume_metadata.blocksize = kwargs.get('blocksize', self.blocksize)
        volume_metadata.volume_id = kwargs.get('volume_id', self.volume_id)
        volume_metadata.name = kwargs.get('name', self.name)
        volume_metadata.description = kwargs.get('description',
                                                 self.description)
        volume_metadata.volume_version = kwargs.get('volume_version',
                                                    self.version)
        volume_metadata.cert_version = kwargs.get('cert_version',
                                                  self.cert_version)
        volume_metadata.volume_public_key = kwargs.get(
            'metadata_public_key', self.metadata_public_key)
        volume_metadata.archive = kwargs.get('archive', self.archive)
        volume_metadata.private = kwargs.get('private', self.private)
        volume_metadata.allow_anon = kwargs.get('allow_anon', self.allow_anon)

        if kwargs.get('closure', self.closure) is not None:
            volume_metadata.cache_closure_text = kwargs.get(
                'closure', self.closure)

        # sign it
        volume_metadata.signature = ""

        data = volume_metadata.SerializeToString()
        sig = self.sign_message(data)

        volume_metadata.signature = sig

        return

    def protobuf_gateway_cert(self,
                              gateway_cert,
                              gateway,
                              sign=True,
                              need_closure=True):
        """
      Given an ms_gateway_cert protobuf and a gateway record, have the gateway populate the 
      cert protobuf and then have the Volume optionally sign it with its private key.
      """

        gateway.protobuf_cert(gateway_cert, need_closure=need_closure)

        gateway_cert.signature = ""

        if sign:
            # sign the cert
            data = gateway_cert.SerializeToString()
            sig = self.sign_message(data)

            gateway_cert.signature = sig

        return

    def protobuf_gateway_cert_manifest_record(self, cert_block, g_id,
                                              gateway_type, caps,
                                              cert_version):
        """
      Populate a protobuf manifest entry with a gateway's certificate information.
      """

        cert_block.gateway_id = g_id
        cert_block.start_id = gateway_type
        cert_block.end_id = caps
        cert_block.block_versions.append(cert_version)

    def protobuf_gateway_cert_manifest(self,
                                       manifest,
                                       include_cert=None,
                                       sign=True):
        """
      Generate a specially-crafted manifest protobuf, which a gateway can use to learn 
      the IDs and types of all gateways in the Volume, as well as their certs' versions.
      """

        manifest.volume_id = self.volume_id
        manifest.coordinator_id = 0
        manifest.file_id = 0
        manifest.owner_id = 0
        manifest.file_version = self.cert_version
        manifest.mtime_sec = 0
        manifest.mtime_nsec = 0
        manifest.fent_mtime_sec = 0
        manifest.fent_mtime_nsec = 0

        sz = 0

        # query certificate versions, types, and caps of all gateways that need to be trusted
        listing = Gateway.ListAll(
            {
                "Gateway.volume_id ==": self.volume_id,
                "Gateway.need_cert ==": True
            },
            projection=["g_id", "gateway_type", "cert_version", "caps"])

        # if the caller wants to include a particular gateway's cert, do so
        has_included_cert = False

        for gateway_metadata in listing:
            cert_block = manifest.block_url_set.add()

            self.protobuf_gateway_cert_manifest_record(
                cert_block, gateway_metadata.g_id,
                gateway_metadata.gateway_type, gateway_metadata.caps,
                gateway_metadata.cert_version)

            logging.info(
                "cert block: (%s, %s, %s, %x)" %
                (gateway_metadata.gateway_type, gateway_metadata.g_id,
                 gateway_metadata.cert_version, gateway_metadata.caps))
            sz += 1

            if gateway_metadata.g_id == include_cert:
                has_included_cert = True

        if not has_included_cert and include_cert is not None:

            # get this gateway's cert as well
            gw = Gateway.Read(include_cert)

            if gw is not None:
                cert_block = manifest.block_url_set.add()

                self.protobuf_gateway_cert_manifest_record(
                    cert_block, gw.g_id, gw.gateway_type, gw.caps,
                    gw.cert_version)

                logging.info("cert block (included for %s): (%s, %s, %s, %x)" %
                             (include_cert, gw.gateway_type, gw.g_id,
                              gw.cert_version, gw.caps))
                sz += 1

        manifest.size = sz
        manifest.signature = ""

        if sign:
            data = manifest.SerializeToString()
            sig = self.sign_message(data)

            manifest.signature = sig

        return

    def is_gateway_in_volume(self, gateway):
        """
      Determine whether a given Gateway instance belongs to this Volume.
      If the Volume is not private, then it "belongs" by default.
      """

        if self.allow_anon:
            return True

        return gateway.volume_id == self.volume_id

    def sign_message(self, data):
        """
      Return the base64-encoded crypto signature of the data,
      signed with our metadata private key.
      """
        signature = Volume.auth_sign(self.metadata_private_key, data)
        if signature is None:
            raise Exception("Failed to sign data")

        sigb64 = base64.b64encode(signature)
        return sigb64

    @classmethod
    def Create(cls, user, **kwargs):
        """
      Given volume data, store it.
      Update the corresponding SyndicateUser atomically along with creating the Volume
      so that the SyndicateUser owns the Volume.
      
      Arguments:
      user              -- SyndicateUser instance that will own this Volume
      
      Required keyword arguments:
      name              -- name of the Volume (str)
      blocksize         -- size of the Volume's blocks in bytes (int)
      description       -- description of the Volume (str)
      private           -- whether or not this Volume is visible to other users (bool)
      
      Optional keyword arguments:
      metadata_private_key       -- PEM-encoded RSA private key, 4096 bits (str)
      archive                    -- whether or not this Volume is populated only by Acquisition Gateways (bool)
      default_gateway_caps      -- bitfield of capabilities Gateways created within this Volume should receive
      """

        # sanity check
        if not user:
            raise Exception("No user given")

        kwargs[
            'owner_id'] = 0  # will look up user and fill with owner ID once we validate input.
        Volume.fill_defaults(kwargs)

        # extract public key from private key if needed
        Volume.extract_keys('metadata_public_key', 'metadata_private_key',
                            kwargs, VOLUME_RSA_KEYSIZE)

        # Validate
        missing = Volume.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        invalid = Volume.validate_fields(kwargs)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        # vet the keys
        for key_field in ['metadata_public_key', 'metadata_private_key']:
            key_str = kwargs[key_field]
            valid = cls.is_valid_key(key_str, VOLUME_RSA_KEYSIZE)
            if not valid:
                raise Exception("Key must be a %s-bit RSA key" %
                                (VOLUME_RSA_KEYSIZE))

        # attempt to create the Volume
        volume_id = random.randint(1, 2**63 - 1)

        volume_key_name = Volume.make_key_name(volume_id=volume_id)
        volume_key = storagetypes.make_key(Volume, volume_key_name)

        # put the Volume and nameholder at the same time---there's a good chance we'll succeed
        volume_nameholder_fut = VolumeNameHolder.create_async(
            kwargs['name'], volume_id)
        volume_fut = Volume.get_or_insert_async(
            volume_key_name,
            name=kwargs['name'],
            blocksize=kwargs['blocksize'],
            description=kwargs['description'],
            owner_id=user.owner_id,
            volume_id=volume_id,
            active=kwargs.get('active', False),
            version=1,
            cert_version=1,
            private=kwargs['private'],
            archive=kwargs['archive'],
            allow_anon=kwargs['allow_anon'],
            metadata_public_key=kwargs['metadata_public_key'],
            metadata_private_key=kwargs['metadata_private_key'],
            default_gateway_caps=kwargs['default_gateway_caps'])

        storagetypes.wait_futures([volume_nameholder_fut, volume_fut])

        # verify that there was no collision
        volume = volume_fut.get_result()
        volume_nameholder = volume_nameholder_fut.get_result()

        if volume_nameholder.volume_id != volume_id:
            # name collision
            storagetypes.deferred.defer(Volume.delete_all, [volume_key])
            raise Exception("Volume '%s' already exists!" % kwargs['name'])

        if volume.volume_id != volume_id:
            # ID collision
            storagetypes.deferred.defer(Volume.delete_all,
                                        [volume_key, volume_nameholder.key])
            raise Exception("Volume ID collision.  Please try again")

        # set permissions
        req = VolumeAccessRequest.create_async(
            user.owner_id,
            volume_id,
            kwargs['name'],
            random.randint(-2**63, 2**63 - 1),
            VolumeAccessRequest.STATUS_GRANTED,
            gateway_caps=kwargs['default_gateway_caps'],
            allowed_gateways=(1 << GATEWAY_TYPE_AG) | (1 << GATEWAY_TYPE_UG) |
            (1 << GATEWAY_TYPE_RG),
            request_message="Created").get_result()
        return volume_key

    @classmethod
    def Read(cls, volume_name_or_id, async=False, use_memcache=True):
Пример #8
0
class GatewayDriver(storagetypes.Object):
    """
   Gateway driver, addressed by hash.
   """

    driver_hash = storagetypes.String()  # hex string
    driver_text = storagetypes.Blob()
    refcount = storagetypes.Integer()

    @classmethod
    def hash_driver(cls, driver_text):
        h = HashAlg.new()
        h.update(driver_text)
        return h.hexdigest()

    @classmethod
    def make_key_name(cls, driver_hash):
        return "GatewayDriver: hash=%s" % (driver_hash)

    @classmethod
    def create_or_ref(cls, _text):
        """
      Create a new driver, or re-ref the existing one.
      Do so atomically.
      """
        driver_hash = cls.hash_driver(_text)

        def txn():

            dk = storagetypes.make_key(
                GatewayDriver, GatewayDriver.make_key_name(driver_hash))
            d = dk.get()
            f = None

            if d is None:
                d = GatewayDriver(key=dk,
                                  driver_hash=driver_hash,
                                  driver_text=_text,
                                  refcount=1)
                d.put()

            else:
                d.refcount += 1
                d.put()

            return d

        return storagetypes.transaction(txn)

    @classmethod
    def ref(cls, driver_hash):
        """
      Increment reference count.
      Do this in an "outer" transaction (i.e. Gateway.Update)
      """
        dk = storagetypes.make_key(GatewayDriver,
                                   cls.make_key_name(driver_hash))
        d = dk.get()

        if d is None:
            return False

        d.refcount += 1
        d.put()
        return True

    @classmethod
    def unref(cls, driver_hash):
        """
      Unref a driver
      Delete it if its ref count goes non-positive.
      Do this in an "outer" transaction (i.e. Gateway.Delete, Gateway.Update)
      """
        dk = storagetypes.make_key(GatewayDriver,
                                   cls.make_key_name(driver_hash))
        d = dk.get()

        if d is None:
            return True

        d.refcount -= 1
        if d.refcount <= 0:
            dk.delete()
        else:
            d.put()

        return True

    @classmethod
    def unref_async(cls, driver_hash):
        """
      Unref a driver, asynchronously
      Delete it if its ref count goes non-positive.
      Do this in an "outer" transaction (i.e. Gateway.Delete, Gateway.Update)
      """
        dk = storagetypes.make_key(GatewayDriver,
                                   cls.make_key_name(driver_hash))
        d = dk.get()

        if d is None:
            return True

        d.refcount -= 1
        if d.refcount <= 0:
            dk.delete_async()
        else:
            d.put_async()

        return True
Пример #9
0
class Gateway(storagetypes.Object):

    # signed gateaway certificate from the user
    gateway_cert = storagetypes.Blob(
    )  # protobuf'ed gateway certificate generated and signed by the gateway owner upon creation

    # all of the below information is derived from the above signed gateway certificate.
    # it is NOT filled in by any method.
    gateway_type = storagetypes.Integer(default=0)

    owner_id = storagetypes.Integer(
        default=-1)  # ID of the SyndicateUser that owns this gateway
    host = storagetypes.String()
    port = storagetypes.Integer()
    name = storagetypes.String()  # name of this gateway
    g_id = storagetypes.Integer()
    volume_id = storagetypes.Integer(default=-1)
    deleted = storagetypes.Boolean(default=False)

    gateway_public_key = storagetypes.Text(
    )  # PEM-encoded RSA public key to verify control-plane messages (metadata) sent from this gateway.

    caps = storagetypes.Integer(default=0)  # capabilities

    cert_expires = storagetypes.Integer(default=-1)  # -1 means "never expires"

    cert_version = storagetypes.Integer(
        default=1)  # certificate-related version of this gateway

    driver_hash = storagetypes.String(
    )  # driver hash for this gateway (addresses GatewayDriver).  hex string, not byte string

    need_cert = storagetypes.Boolean(
        default=False
    )  # whether or not other gateways in the volume need this gateway's certificate (i.e. will this gateway ever serve data)

    # for RPC
    key_type = "gateway"

    required_attrs = ["gateway_cert"]

    read_attrs_api_required = [
        "driver_hash",
        "host",
        "port",
        "owner_id",
        "g_id",
        "gateway_type",
        "volume_id",
        "cert_version",
        "cert_expires",
        "caps",
    ]

    read_attrs = [
        "gateway_public_key",
        "name",
    ] + read_attrs_api_required

    # fields an API call can set
    write_attrs = ["gateway_cert"]

    # attrs from the cert that are allowed to change between cert versions
    modifiable_cert_attrs = [
        "gateway_type", "host", "port", "caps", "cert_expires", "cert_version",
        "driver_hash", "gateway_public_key"
    ]

    write_attrs_api_required = write_attrs

    default_values = {"gateway_cert": ""}

    key_attrs = ["g_id"]

    validators = {
       "name": (lambda cls, value: len( unicode(value).translate(dict((ord(char), None) for char in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.: ")) ) == 0 \
                                   and not is_int(value) \
                                   and len(value) > 0 ),
       "gateway_public_key": (lambda cls, value: Gateway.is_valid_key( value, GATEWAY_RSA_KEYSIZE ) and Gateway.is_public_key( value ) )
    }

    @classmethod
    def needs_cert(cls, gateway_type, caps):
        """
      Given a gateway's capabilities, will another gateway need its certificate?
      """
        if (caps & (GATEWAY_CAP_WRITE_METADATA | GATEWAY_CAP_WRITE_DATA
                    | GATEWAY_CAP_COORDINATE)) != 0:
            return True

        return False

    def owned_by(self, user):
        return user.owner_id == self.owner_id or self.owner_id == GATEWAY_ID_ANON

    def load_pubkey(self, pubkey_str, in_base64=True):
        """
      Load a PEM-encoded RSA public key.
      if in_base64 == True, then try to base64-decode it first (i.e. the PEM-encoded
      public key is itself base64-encoded again)
      
      return 0 on success
      return -EINVAL if the key is invalid 
      return -EEXIST if the key is the same as the one we have in this Gateway
      """

        pubkey_str_unencoded = None

        if in_base64:
            pubkey_str_unencoded = base64.b64decode(pubkey_str)
        else:
            pubkey_str_unencoded = pubkey_str

        if not Gateway.is_valid_key(pubkey_str_unencoded, GATEWAY_RSA_KEYSIZE):
            return -errno.EINVAL

        new_public_key = CryptoKey.importKey(pubkey_str_unencoded).exportKey()
        if self.gateway_public_key is not None and new_public_key == self.gateway_public_key:
            return -errno.EEXIST

        self.gateway_public_key = new_public_key

        return 0

    def protobuf_cert(self, cert_pb):
        """
      Populate an ms_gateway_cert structure from our cert
      """

        gateway_cert_pb = ms_pb2.ms_gateway_cert.ParseFromString(
            self.gateway_cert)
        cert_pb.CopyFrom(gateway_cert_pb)

    def check_caps(self, caps):
        """
      Given a bitmask of caps, verify that all of them are met by our caps.
      """
        return (self.caps & caps) == caps

    def verify_message(self, msg):
        """
      Verify the authenticity of a received message with a signature field (which should store a base64-encoded signature)
      """
        sig = msg.signature
        sig_bin = base64.b64decode(sig)

        msg.signature = ""
        msg_str = msg.SerializeToString()

        ret = self.auth_verify(self.gateway_public_key, msg_str, sig_bin)

        msg.signature = sig

        return ret

    def authenticate_session(self, g_type, g_id, url, signature_b64):
        """
      Verify that the signature over the constructed string "${g_type}_${g_id}:${url}"
      was signed by this gateway's private key.
      """
        sig = base64.b64decode(signature_b64)

        data = "%s_%s:%s" % (g_type, g_id, url)

        ret = self.auth_verify(self.gateway_public_key, data, sig)

        return ret

    @classmethod
    def cert_to_dict(cls, gateway_cert):
        """
      Convert a protobuf structure to a dict of values,
      using the Gateway property names.
      """

        # unpack certificate
        cert_version = gateway_cert.version
        gateway_name = gateway_cert.name
        gateway_type = gateway_cert.gateway_type
        gateway_id = gateway_cert.gateway_id
        host = gateway_cert.host
        port = gateway_cert.port
        pubkey_pem = gateway_cert.public_key
        cert_expires = gateway_cert.cert_expires
        requested_caps = gateway_cert.caps
        driver_hash = binascii.hexlify(gateway_cert.driver_hash)
        volume_id = gateway_cert.volume_id
        owner_id = gateway_cert.owner_id

        kwargs = {
            "cert_version": cert_version,
            "name": gateway_name,
            "gateway_type": gateway_type,
            "host": host,
            "port": port,
            "gateway_public_key": pubkey_pem,
            "cert_expires": cert_expires,
            "caps": requested_caps,
            "driver_hash": driver_hash,
            "volume_id": volume_id,
            "owner_id": owner_id,
            "g_id": gateway_id,
            "gateway_cert": gateway_cert.SerializeToString()
        }

        return kwargs

    @classmethod
    def Create(cls, user, volume, gateway_cert, driver_text):
        """
      Create a gateway, using its user-signed gateway certificate.
      
      NOTE: the caller must verify the authenticity of the certificate.
      """

        kwargs = cls.cert_to_dict(gateway_cert)

        # sanity check
        if kwargs['volume_id'] != volume.volume_id:
            raise Exception("Volume ID mismatch: cert has %s; expected %s" %
                            (kwargs['volume_id'], volume.volume_id))

        if kwargs['owner_id'] != user.owner_id:
            # this is only okay if the user is the volume owner, and the gateway ID is the anonymous gateway
            if not (kwargs['owner_id'] == USER_ID_ANON
                    and volume.owner_id == user.owner_id):
                raise Exception("User ID mismatch: cert has %s; expected %s" %
                                (kwargs['owner_id'], user.owner_id))

        # sanity check: do we have everything we need?
        missing = cls.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        # sanity check: are our fields valid?
        invalid = cls.validate_fields(kwargs)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        # sanity check: does the driver match the driver's hash in the cert?
        if driver_text is not None:
            driver_hash = GatewayDriver.hash_driver(driver_text)
            if driver_hash != binascii.hexlify(gateway_cert.driver_hash):
                raise Exception(
                    "Driver hash mismatch: len = %s, expected = %s, got = %s" %
                    (len(driver_text), driver_hash,
                     binascii.hexlify(cert.driver_hash)))

        gateway_type = kwargs['gateway_type']

        # enforce cert distribution
        kwargs['need_cert'] = Gateway.needs_cert(gateway_type, kwargs['caps'])

        g_id = kwargs['g_id']
        g_key_name = Gateway.make_key_name(g_id=g_id)
        g_key = storagetypes.make_key(cls, g_key_name)

        # create a nameholder and this gateway at once---there's a good chance we'll succeed
        futs = []

        gateway_nameholder_fut = GatewayNameHolder.create_async(
            kwargs['name'], g_id)
        gateway_fut = cls.get_or_insert_async(g_key_name, **kwargs)

        futs = [gateway_nameholder_fut, gateway_fut]

        gateway_driver = None
        if driver_text is not None:
            gateway_driver = GatewayDriver.create_or_ref(driver_text)

        # wait for operations to complete
        storagetypes.wait_futures(futs)

        # check for collision...
        gateway_nameholder = gateway_nameholder_fut.get_result()
        gateway = gateway_fut.get_result()

        to_rollback = []

        if gateway_driver is not None:
            to_rollback.append(gateway_driver.key)

        if gateway_nameholder.g_id != g_id:
            # name collision...
            to_rollback.append(g_key)
            storagetypes.deferred.defer(Gateway.delete_all, to_rollback)
            raise Exception("Gateway '%s' already exists!" % kwargs['name'])

        if gateway.g_id != g_id:
            # ID collision...
            to_rollback.append(gateway_nameholder.key)
            to_rollback.append(g_key)
            storagetypes.deferred.defer(Gateway.delete_all, to_rollback)
            raise Exception("Gateway ID collision.  Please try again.")

        # we're good!
        return g_key

    @classmethod
    @storagetypes.concurrent
    def Read_Async(cls, key, deleted=False):
        gw = yield key.get_async()
        if gw is None:
            storagetypes.concurrent_return(None)

        if gw.deleted and not deleted:
            storagetypes.concurrent_return(None)

        storagetypes.concurrent_return(gw)

    @classmethod
    def Read(cls, g_name_or_id, async=False, use_memcache=True, deleted=False):
Пример #10
0
class Gateway(storagetypes.Object):

    gateway_type = storagetypes.Integer(default=0)

    owner_id = storagetypes.Integer(
        default=-1)  # ID of the SyndicateUser that owns this gateway
    host = storagetypes.String()
    port = storagetypes.Integer()
    name = storagetypes.String()  # name of this gateway
    g_id = storagetypes.Integer()
    volume_id = storagetypes.Integer(default=-1)

    gateway_public_key = storagetypes.Text(
    )  # PEM-encoded RSA public key to verify control-plane messages (metadata) sent from this gateway.
    encrypted_gateway_private_key = storagetypes.Text(
    )  # optional: corresponding RSA private key, sealed with user's password.  Can only be set on creation.

    caps = storagetypes.Integer(default=0)  # capabilities

    session_password_hash = storagetypes.Text()
    session_password_salt = storagetypes.Text()
    session_timeout = storagetypes.Integer(default=-1, indexed=False)
    session_expires = storagetypes.Integer(
        default=-1)  # -1 means "never expires"

    cert_expires = storagetypes.Integer(default=-1)  # -1 means "never expires"

    cert_version = storagetypes.Integer(
        default=1)  # certificate-related version of this gateway

    closure = storagetypes.Text()  # closure data for this gateway

    need_cert = storagetypes.Boolean(
        default=False
    )  # whether or not other gateways in the volume need this gateway's certificate (i.e. will this gateway ever serve data)

    # for RPC
    key_type = "gateway"

    required_attrs = [
        "owner_id", "host", "port", "name", "gateway_type", "caps"
    ]

    read_attrs_api_required = [
        "closure", "host", "port", "owner_id", "g_id", "gateway_type",
        "volume_id", "session_timeout", "session_expires", "cert_version",
        "cert_expires", "caps", "encrypted_gateway_private_key"
    ]

    read_attrs = ["gateway_public_key", "name"] + read_attrs_api_required

    write_attrs = [
        "closure", "host", "port", "cert_expires", "session_expires",
        "session_timeout"
    ]

    write_attrs_api_required = write_attrs

    # TODO: session expires in 3600 seconds
    # TODO: cert expires in 86400 seconds
    default_values = {
        "session_expires": (lambda cls, attrs: -1),
        "cert_version": (lambda cls, attrs: 1),
        "cert_expires": (lambda cls, attrs: -1),
        "caps": (lambda cls, attrs: 0),
        "encrypted_gateway_private_key": (lambda cls, attrs: None)
    }

    key_attrs = ["g_id"]

    validators = {
        "session_password_hash": (lambda cls, value: len(
            unicode(value).translate(
                dict((ord(char), None) for char in "0123456789abcdef"))) == 0),
        "name": (lambda cls, value: len(
            unicode(value).translate(
                dict(
                    (ord(char), None) for char in
                    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.: "
                ))) == 0 and not is_int(value)),
        "gateway_public_key":
        (lambda cls, value: Gateway.is_valid_key(value, GATEWAY_RSA_KEYSIZE))
    }

    @classmethod
    def safe_caps(cls, gateway_type, given_caps):
        '''
      Get this gateway's capability bits, while making sure that AGs and RGs 
      get hardwired capabilities.
      '''
        if gateway_type == GATEWAY_TYPE_AG:
            # caps are always read and write metadata
            return (GATEWAY_CAP_READ_METADATA | GATEWAY_CAP_WRITE_METADATA)

        elif gateway_type == GATEWAY_TYPE_RG:
            # caps are always 0
            return 0

        return given_caps

    @classmethod
    def needs_cert(cls, gateway_type, caps):
        """
      Given a gateway's capabilities, will another gateway need its certificate?
      """
        if gateway_type == GATEWAY_TYPE_AG:
            return True

        if gateway_type == GATEWAY_TYPE_RG:
            return True

        if (caps & (GATEWAY_CAP_WRITE_METADATA | GATEWAY_CAP_WRITE_DATA
                    | GATEWAY_CAP_COORDINATE)) != 0:
            return True

        return False

    def owned_by(self, user):
        return user.owner_id == self.owner_id

    def authenticate_session(self, password):
        """
      Verify that the session password is correct
      """
        pw_hash = Gateway.generate_password_hash(password,
                                                 self.session_password_salt)
        return pw_hash == self.session_password_hash

    @classmethod
    def generate_password_hash(cls, pw, salt):
        '''
      Given a password and salt, generate the hash to store.
      '''
        h = HashAlg.new()
        h.update(salt)
        h.update(pw)

        pw_hash = h.hexdigest()

        return unicode(pw_hash)

    @classmethod
    def generate_password(cls, length):
        '''
      Create a random password of a given length
      '''
        password = "".join([
            random.choice(
                "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
            ) for i in xrange(length)
        ])
        return password

    @classmethod
    def generate_session_password(cls):
        '''
      Generate a session password
      '''
        return cls.generate_password(GATEWAY_SESSION_PASSWORD_LENGTH)

    @classmethod
    def generate_session_secrets(cls):
        """
      Generate a password, password hash, and salt for this gateway
      """
        password = cls.generate_session_password()
        salt = cls.generate_password(GATEWAY_SESSION_SALT_LENGTH)
        pw_hash = Gateway.generate_password_hash(password, salt)

        return (password, pw_hash, salt)

    def regenerate_session_password(self):
        """
      Regenerate a session password.  The caller should put() 
      the gateway after this call to save the hash and salt.
      """
        password, pw_hash, salt = Gateway.generate_session_secrets()
        if self.session_timeout > 0:
            self.session_expires = now + self.session_timeout
        else:
            self.session_expires = -1

        self.session_password_hash = pw_hash
        self.session_password_salt = salt
        return password

    def load_pubkey(self, pubkey_str, in_base64=True):
        """
      Load a PEM-encoded RSA public key.
      if in_base64 == True, then try to base64-decode it first (i.e. the PEM-encoded
      public key is itself base64-encoded again)
      
      return 0 on success
      return -EINVAL if the key is invalid 
      return -EEXIST if the key is the same as the one we have in this Gateway
      """

        pubkey_str_unencoded = None

        if in_base64:
            pubkey_str_unencoded = base64.b64decode(pubkey_str)
        else:
            pubkey_str_unencoded = pubkey_str

        if not Gateway.is_valid_key(pubkey_str_unencoded, GATEWAY_RSA_KEYSIZE):
            return -errno.EINVAL

        new_public_key = CryptoKey.importKey(pubkey_str_unencoded).exportKey()
        if self.gateway_public_key is not None and new_public_key == self.gateway_public_key:
            return -errno.EEXIST

        self.gateway_public_key = new_public_key

        return 0

    def protobuf_cert(self, cert_pb, need_closure=False):
        """
      Populate an ms_volume_gateway_cred structure
      """
        cert_pb.version = self.cert_version
        cert_pb.gateway_type = self.gateway_type
        cert_pb.owner_id = self.owner_id
        cert_pb.gateway_id = self.g_id
        cert_pb.name = self.name
        cert_pb.host = self.host
        cert_pb.port = self.port
        cert_pb.caps = self.caps
        cert_pb.cert_expires = self.cert_expires
        cert_pb.volume_id = self.volume_id

        if self.closure is None or not need_closure:
            cert_pb.closure_text = ""

        elif self.closure is not None:
            cert_pb.closure_text = str(self.closure)

        cert_pb.signature = ""

        if self.gateway_public_key != None:
            cert_pb.public_key = self.gateway_public_key
        else:
            cert_pb.public_key = "NONE"

    def check_caps(self, caps):
        """
      Given a bitmask of caps, verify that all of them are met by our caps.
      """
        return (self.caps & caps) == caps

    def verify_message(self, msg):
        """
      Verify the authenticity of a received message with a signature field (which should store a base64-encoded signature)
      """
        sig = msg.signature
        sig_bin = base64.b64decode(sig)

        msg.signature = ""
        msg_str = msg.SerializeToString()

        ret = self.auth_verify(self.gateway_public_key, msg_str, sig_bin)

        msg.signature = sig

        return ret

    @classmethod
    def Create(cls, user, volume, **kwargs):
        """
      Create a gateway.
      NOTE: careful--caps are required!  don't let users call this directly.
      """

        # enforce volume ID
        kwargs['volume_id'] = volume.volume_id

        # enforce ownership--make sure the calling user owns this gateway
        kwargs['owner_id'] = user.owner_id

        # populate kwargs with default values for missing attrs
        cls.fill_defaults(kwargs)

        # sanity check: do we have everything we need?
        missing = cls.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        # sanity check: are our fields valid?
        invalid = cls.validate_fields(kwargs)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        # what kind of gateway are we?
        gateway_type = kwargs['gateway_type']

        # set capabilities correctly and safely
        kwargs['caps'] = cls.safe_caps(gateway_type,
                                       volume.default_gateway_caps)

        # enforce cert generation
        kwargs['need_cert'] = Gateway.needs_cert(gateway_type, kwargs['caps'])

        # ID...
        g_id = random.randint(0, 2**63 - 1)
        kwargs['g_id'] = g_id

        g_key_name = Gateway.make_key_name(g_id=g_id)
        g_key = storagetypes.make_key(cls, g_key_name)

        # create a nameholder and this gateway at once---there's a good chance we'll succeed
        gateway_nameholder_fut = GatewayNameHolder.create_async(
            kwargs['name'], g_id)
        gateway_fut = cls.get_or_insert_async(g_key_name, **kwargs)

        # wait for operations to complete
        storagetypes.wait_futures([gateway_nameholder_fut, gateway_fut])

        # check for collision...
        gateway_nameholder = gateway_nameholder_fut.get_result()
        gateway = gateway_fut.get_result()

        if gateway_nameholder.g_id != g_id:
            # name collision...
            storagetypes.deferred.defer(Gateway.delete_all, [g_key])
            raise Exception("Gateway '%s' already exists!" % kwargs['name'])

        if gateway.g_id != g_id:
            # ID collision...
            storagetypes.deferred.defer(Gateway.delete_all,
                                        [gateway_nameholder.key, g_key])
            raise Exception("Gateway ID collision.  Please try again.")

        # we're good!
        return g_key

    @classmethod
    def Read(cls, g_name_or_id, async=False, use_memcache=True):
Пример #11
0
class Closure(storagetypes.Object):

    closure_id = storagetypes.Integer(default=0)  # unique ID of this closure
    name = storagetypes.String(default="")  # name of this closure
    owner_id = storagetypes.Integer(default=0)  # owner of this closure
    public = storagetypes.Boolean(
        default=False
    )  # whether or not other users' gateways can access this closure

    blob_ref = storagetypes.Text()  # reference to the closure blob

    # for RPC
    key_type = "closure"

    required_attrs = ["owner_id", "public", "blob_ref"]

    read_attrs_api_required = ["blob_ref"]

    read_attrs = ["closure_id", "name", "public"] + read_attrs_api_required

    write_attrs = ["blob_ref"]

    write_attrs_api_required = write_attrs

    key_attrs = ["closure_id"]

    @classmethod
    def Create(cls, user, **kwargs):
        """
      Create a closure.
      Only do this after the closure binary has been uploaded successfully.
      """

        # enforce ownership--make sure the calling user owns this closure
        kwargs['owner_id'] = user.owner_id

        # populate kwargs with default values for missing attrs
        cls.fill_defaults(kwargs)

        # sanity check: do we have everything we need?
        missing = cls.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        # sanity check: are our fields valid?
        invalid = cls.validate_fields(kwargs)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        # ID...
        closure_id = random.randint(0, 2**63 - 1)
        kwargs['closure_id'] = closure_id

        closure_key_name = Closure.make_key_name(closure_id=closure_id)
        closure_key = storagetypes.make_key(cls, closure_key_name)

        # create a nameholder and this closure at once---there's a good chance we'll succeed
        closure_nameholder_fut = ClosureNameHolder.create_async(
            kwargs['name'], closure_id)
        closure_fut = cls.get_or_insert_async(closure_key_name, **kwargs)

        # wait for operations to complete
        storagetypes.wait_futures([closure_nameholder_fut, closure_fut])

        # check for collision...
        closure_nameholder = closure_nameholder_fut.get_result()
        closure = closure_fut.get_result()

        if closure_nameholder.closure_id != closure_id:
            # name collision...
            storagetypes.deferred.defer(Closure.delete_all, [closure_key])
            raise Exception("Closure '%s' already exists!" % kwargs['name'])

        if closure.closure_id != closure_id:
            # ID collision...
            storagetypes.deferred.defer(Closure.delete_all,
                                        [closure_nameholder.key, closure_key])
            raise Exception("Closure ID collision.  Please try again.")

        # we're good!
        return closure_key

    @classmethod
    def Read(cls, closure_name_or_id, async=False, use_memcache=True):
Пример #12
0
class MSBaselinePerformanceType(storagetypes.Object):
    """
    Record to get and put once per baseline performance test.
    """
    rec_txt = storagetypes.String(default="")
Пример #13
0
class SyndicateUser(storagetypes.Object):

    email = storagetypes.String()  # used as the username to Syndicate
    owner_id = storagetypes.Integer()  # numeric ID for gateways
    admin_id = storagetypes.Integer()  # which admin made this user?

    max_volumes = storagetypes.Integer(
        default=10
    )  # how many Volumes can this user create? (-1 means unlimited)
    max_gateways = storagetypes.Integer(
        default=10
    )  # how many gateways can this user create?  (-1 means unlimited)

    is_admin = storagetypes.Boolean(
        default=False, indexed=False)  # is this user an administrator?

    public_key = storagetypes.Text(
    )  # PEM-encoded public key for authenticating this user, or USER_KEY_UNSET if it is not set, or USER_KEY_UNUSED if it will not be used

    user_cert_protobuf = storagetypes.Blob(
    )  # protobuf'ed certificate for this user
    signature = storagetypes.Blob(
    )  # signature over the data used to generate this record

    # for RPC
    key_type = "user"

    required_attrs = ["email", "public_key"]

    key_attrs = ["email"]

    default_values = {
        "max_volumes": (lambda cls, attrs: 10),
        "max_gateways": (lambda cls, attrs: 10),
        "is_admin": (lambda cls, attrs: False),
    }

    validators = {
        "email": (lambda cls, value: valid_email(value)),
        "public_key": (lambda cls, value: cls.is_valid_key(
            value, USER_RSA_KEYSIZE) and cls.is_public_key(value))
    }

    read_attrs_api_required = [
        "email",
        "owner_id",
        "max_volumes",
        "max_gateways",
        "public_key",
    ]

    read_attrs = read_attrs_api_required

    write_attrs_api_required = [
        "public_key",
    ]

    write_attrs_admin_required = ["max_volumes", "max_gateways", "is_admin"]

    write_attrs = write_attrs_api_required + write_attrs_admin_required

    # what fields in the cert can change?
    modifiable_cert_fields = ["public_key", "max_volumes", "max_gateways"]

    def owned_by(self, user):
        return user.owner_id == self.owner_id

    @classmethod
    def Authenticate(cls, email, data, data_signature):
        """
      Authenticate a user via public-key cryptography.
      Verify that data was signed by the user's private key, given the signature and data.
      (use RSA PSS for security).
      Return the user on success; False on authentication error; None if the user doesn't exist
      """
        user = SyndicateUser.Read(email)
        if user == None:
            return None

        ret = cls.auth_verify(user.public_key, data, data_signature)
        if not ret:
            logging.error("Verification failed for %s" % email)
            return False

        else:
            return user

    @classmethod
    def cert_to_dict(cls, user_cert):

        attrs = {
            'email': str(user_cert.email),
            'owner_id': user_cert.user_id,
            'public_key': str(user_cert.public_key),
            'admin_id': user_cert.admin_id,
            'max_volumes': user_cert.max_volumes,
            'max_gateways': user_cert.max_gateways,
            'is_admin': user_cert.is_admin,
            'signature': str(user_cert.signature),
            'user_cert_protobuf': user_cert.SerializeToString()
        }

        return attrs

    @classmethod
    def Create(cls, user_cert):
        """
      Create a SyndicateUser from a user_cert.
      
      NOTE: the caller will need to have validated the user cert
      """

        kwargs = cls.cert_to_dict(user_cert)
        email = kwargs['email']

        missing = SyndicateUser.find_missing_attrs(kwargs)
        if len(missing) != 0:
            raise Exception("Missing attributes: %s" % (", ".join(missing)))

        invalid = SyndicateUser.validate_fields(kwargs)
        if len(invalid) != 0:
            raise Exception("Invalid values for fields: %s" %
                            (", ".join(invalid)))

        user_key_name = SyndicateUser.make_key_name(email=email)
        user = storagetypes.memcache.get(user_key_name)
        if user == None:

            user_key = storagetypes.make_key(SyndicateUser, user_key_name)
            user = user_key.get()

            if user == None:

                # create!
                user = SyndicateUser.get_or_insert(user_key_name, **kwargs)

                # check for collisions
                if user.owner_id != kwargs['owner_id']:
                    # collision
                    raise Exception("User '%s' already exists" % email)

                return user.key

            else:
                raise Exception("User '%s' already exists" % email)

        else:
            raise Exception("User '%s' already exists" % email)

    @classmethod
    def CreateAdmin(cls, email, owner_id, public_key, syndicate_private_key):
        """
      Create the admin user.
      Called when the MS initializes itself for the first time 
      """

        import common.api as api

        admin_cert = ms_pb2.ms_user_cert()

        admin_cert.user_id = owner_id
        admin_cert.email = email
        admin_cert.public_key = public_key
        admin_cert.admin_id = owner_id
        admin_cert.max_volumes = -1
        admin_cert.max_gateways = -1
        admin_cert.is_admin = True
        admin_cert.signature = ""

        admin_cert_str = admin_cert.SerializeToString()

        sig = api.sign_data(syndicate_private_key, admin_cert_str)

        admin_cert.signature = base64.b64encode(sig)

        return SyndicateUser.Create(admin_cert)

    @classmethod
    def Read(cls, email_or_owner_id, async=False):
        """
      Read a SyndicateUser
      
      Arguments:
      email_or_owner_id         -- Email address of the user to read, or the owner ID (str or int)
      """
        owner_id = None
        email = None

        try:
            owner_id = int(email_or_owner_id)
        except:
            email = email_or_owner_id

        if owner_id is not None:
            return cls.Read_ByOwnerID(owner_id, async=async)

        user_key_name = SyndicateUser.make_key_name(email=email)
        user_key = storagetypes.make_key(SyndicateUser, user_key_name)

        user = storagetypes.memcache.get(user_key_name)
        if user == None:
            if async:
                return user_key.get_async(use_memcache=False)

            else:
                user = user_key.get(use_memcache=False)
                if not user:
                    return None
                else:
                    storagetypes.memcache.set(user_key_name, user)

        elif async:
            user = storagetypes.FutureWrapper(user)

        return user