コード例 #1
0
from tank_test.tank_test_base import ShotgunTestBase
from tank_test.tank_test_base import setUpModule  # noqa

from tank.authentication import (
    ShotgunAuthenticator,
    IncompleteCredentials,
    DefaultsManager,
    user_impl,
    user,
)

# Create a set of valid cookies, for SSO and Web related tests.
# For a Web session, we detect the presence of the shotgun_current_session_expiration cookie.
valid_web_session_metadata = base64.b64encode(
    six.ensure_binary("shotgun_current_session_expiration=1234"))
# For a Saml session, we detect the presence of the shotgun_sso_session_expiration_u* cookie.
# But we also need to figure out what the user ID is, for which we use the csrf_token_u* suffix.
valid_sso_session_metadata = base64.b64encode(
    six.ensure_binary(
        "csrf_token_u00=fedcba;shotgun_sso_session_expiration_u00=4321"))


class CustomDefaultManager(DefaultsManager):
    def get_host(self):
        return "https://some_site.shotgunstudio.com"


class ShotgunAuthenticatorTests(ShotgunTestBase):
    @patch("tank_vendor.shotgun_api3.Shotgun.server_caps")
    @patch("tank.authentication.session_cache.generate_session_token")
コード例 #2
0
    def __db_insert_activity_updates(self, connection, cursor, entity_type,
                                     entity_id, events):
        """
        Adds a number of records to the activity db. If they
        already exist, they are not re-added

        :param connection: Database connection (coming from the decorator)
        :param cursor: Database cursor (coming from the decorator)
        :param entity_type: Entity type to process
        :param entity_id: Entity id to process
        :param events: Events to insert
        """
        self._bundle.log_debug("Updating database with %s new events" %
                               len(events))
        try:
            for event in events:
                activity_id = event["id"]
                payload = sgtk.util.pickle.dumps(event)
                blob = sqlite3.Binary(six.ensure_binary(payload))

                # first insert event
                if self._force_activity_stream_update:
                    sql = """
                        INSERT OR REPLACE INTO activity(activity_id, payload, created_at)
                        SELECT ?, ?, datetime('now')
                    """
                    params = (activity_id, blob)
                else:
                    sql = """
                        INSERT INTO activity(activity_id, payload, created_at)
                        SELECT ?, ?, datetime('now')
                        WHERE NOT EXISTS(SELECT activity_id FROM activity WHERE activity_id = ?);
                    """
                    params = (activity_id, blob, activity_id)

                cursor.execute(sql, params)
                if self._force_activity_stream_update:
                    sql = """
                        INSERT OR REPLACE INTO entity (entity_type, entity_id, activity_id, created_at)
                        SELECT ?, ?, ?, datetime('now')
                    """
                    params = (entity_type, entity_id, activity_id)
                else:
                    # now insert entity record
                    sql = """
                        INSERT INTO entity (entity_type, entity_id, activity_id, created_at)
                        SELECT ?, ?, ?, datetime('now')
                        WHERE NOT EXISTS(SELECT entity_id FROM entity WHERE entity_type = ? and entity_id = ? and activity_id = ?);
                    """
                    params = (
                        entity_type,
                        entity_id,
                        activity_id,
                        entity_type,
                        entity_id,
                        activity_id,
                    )

                cursor.execute(sql, params)

            connection.commit()
        except:
            # supress and continue
            self._bundle.log_exception("Could not add activity stream data "
                                       "to cache database %s" %
                                       self._cache_path)
        finally:
            self._force_activity_stream_update = False

        self._bundle.log_debug("...update complete")
コード例 #3
0
    def __compute_cache_path(self, cache_seed=None):
        """
        Calculates and returns a cache path to use for this instance's query.

        :param cache_seed: Cache seed supplied to the ``__init__`` method.

        :return: The path to use when caching the model data.
        :rtype: str
        """

        # hashes to use to generate the cache path
        params_hash = hashlib.md5()
        entity_field_hash = hashlib.md5()

        # even though the navigation path provides a nice organizational
        # structure for caching, it can get long. to avoid MAX_PATH issues on
        # windows, just hash it
        params_hash.update(six.ensure_binary(str(self._path)))

        # include the schema generation number for clients
        params_hash.update(six.ensure_binary(str(self._schema_generation)))

        # If this value changes over time (like between Qt4 and Qt5), we need to
        # assume our previous user roles are invalid since Qt might have taken
        # it over. If role's value is 32, don't add it to the hash so we don't
        # invalidate PySide/PyQt4 caches.
        if QtCore.Qt.UserRole != 32:
            params_hash.update(six.ensure_binary(str(QtCore.Qt.UserRole)))

        # include the cache_seed for additional user control over external state
        params_hash.update(six.ensure_binary(str(cache_seed)))

        # iterate through the sorted entity fields to ensure consistent order
        for (entity_type, fields) in sorted(self._entity_fields.items()):
            for field in fields:
                entity_field_hash.update(
                    six.ensure_binary("%s.%s" % (entity_type, field)))

        # convert the seed entity field into a path segment.
        # example: Version.entity => Version/entity
        seed_entity_field_path = os.path.join(
            *self._seed_entity_field.split("."))

        # Organize files on disk based on the seed_entity field path segment and
        # then param and entity field hashes

        # Try to share the cache at the site level which was introduced in tk-core
        # > 0.18.118.
        # If not available, fallback on per project/pipeline config/plugin id
        # caching.
        if hasattr(self._bundle, "site_cache_location"):
            cache_location = self._bundle.site_cache_location
        else:
            cache_location = self._bundle.cache_location

        data_cache_path = os.path.join(
            cache_location,
            "sg_nav",
            seed_entity_field_path,
            params_hash.hexdigest(),
            "%s.%s" % (entity_field_hash.hexdigest(),
                       ShotgunNavDataHandler.FORMAT_VERSION),
        )

        # warn if the path is longer than the windows max path limitation
        if sgtk.util.is_windows() and len(data_cache_path) > 250:
            self._log_warning(
                "Shotgun hierarchy data cache file path may be affected by "
                "windows MAX_PATH limitation.")

        return data_cache_path
コード例 #4
0
    def execute(self, items, preview_mode, **kwargs):
        """
        Creates a list of files and folders.

        The default implementation creates files and folders recursively using
        open permissions.

        :param list(dict): List of actions that needs to take place.

        Six different types of actions are supported.

        **Standard Folder**

        This represents a standard folder in the file system which is not associated
        with anything in Shotgun. It contains the following keys:

        - **action** (:class:`str`) - ``folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item

        **Entity Folder**

        This represents a folder in the file system which is associated with a
        Shotgun entity. It contains the following keys:

        - **action** (:class:`str`) - ``entity_folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item
        - **entity** (:class:`dict`) - Shotgun entity link with keys ``type``, ``id`` and ``name``.

        **Remote Entity Folder**

        This is the same as an entity folder, except that it was originally
        created in another location. A remote folder request means that your
        local toolkit instance has detected that folders have been created by
        a different file system setup. It contains the following keys:

        - **action** (:class:`str`) - ``remote_entity_folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item
        - **entity** (:class:`dict`) - Shotgun entity link with keys ``type``, ``id`` and ``name``.

        **File Copy**

        This represents a file copy operation which should be carried out.
        It contains the following keys:

        - **action** (:class:`str`) - ``copy``
        - **metadata** (:class:`dict`) - The configuration yaml data associated with the directory level
          on which this object exists.
        - **source_path** (:class:`str`) - location of the file that should be copied
        - **target_path** (:class:`str`) - target location to where the file should be copied.

        **File Creation**

        This is similar to the file copy, but instead of a source path, a chunk
        of data is specified. It contains the following keys:

        - **action** (:class:`str`) - ``create_file``
        - **metadata** (:class:`dict`) - The configuration yaml data associated with the directory level
          on which this object exists.
        - **content** (:class:`str`) -- file content
        - **target_path** (:class:`str`) -- target location to where the file should be copied.

        **Symbolic Links**

        This represents a request that a symbolic link is created. Note that symbolic links are not
        supported in the same way on all operating systems. The default hook therefore does not
        implement symbolic link support on Windows systems. If you want to add symbolic link support
        on windows, simply copy this hook to your project configuration and make the necessary
        modifications.

        - **action** (:class:`str`) - ``symlink``
        - **metadata** (:class:`dict`) - The raw configuration yaml data associated with symlink yml config file.
        - **path** (:class:`str`) - the path to the symbolic link
        - **target** (:class:`str`) - the target to which the symbolic link should point

        :returns: List of files and folders that have been created.
        :rtype: list(str)
        """

        # set the umask so that we get true permissions
        old_umask = os.umask(0)
        locations = []
        try:

            # loop through our list of items
            for i in items:

                action = i.get("action")

                if action in ["entity_folder", "folder"]:
                    # folder creation
                    path = i.get("path")
                    if not os.path.exists(path):
                        if not preview_mode:
                            # create the folder using open permissions
                            os.makedirs(path, 0o777)
                        locations.append(path)

                elif action == "remote_entity_folder":
                    # Remote folder creation
                    #
                    # NOTE! This action happens when another user has created
                    # a folder on their machine and we are syncing our local path
                    # cache to be aware of this folder's existance.
                    #
                    # For a traditional setup, where the project storage is shared,
                    # there is no need to do I/O for remote folders - these folders
                    # have already been created on the remote storage so you have access
                    # to them already.
                    #
                    # On a setup where each user or group of users is attached to
                    # different, independendent file storages, which are synced,
                    # it may be meaningful to "replay" the remote folder creation
                    # on the local system. This would result in the same folder
                    # scaffold on each disk which is storing project data.
                    #
                    # path = i.get("path")
                    # if not os.path.exists(path):
                    #     if not preview_mode:
                    #         # create the folder using open permissions
                    #         os.makedirs(path, 0777)
                    #     locations.append(path)
                    pass

                elif action == "symlink":
                    # symbolic link
                    if is_windows():
                        # no windows support
                        continue
                    path = i.get("path")
                    target = i.get("target")
                    # note use of lexists to check existance of symlink
                    # rather than what symlink is pointing at
                    if not os.path.lexists(path):
                        if not preview_mode:
                            os.symlink(target, path)
                        locations.append(path)

                elif action == "copy":
                    # a file copy
                    source_path = i.get("source_path")
                    target_path = i.get("target_path")
                    if not os.path.exists(target_path):
                        if not preview_mode:
                            # do a standard file copy
                            shutil.copy(source_path, target_path)
                            # set permissions to open
                            os.chmod(target_path, 0o666)
                        locations.append(target_path)

                elif action == "create_file":
                    # create a new file based on content
                    path = i.get("path")
                    parent_folder = os.path.dirname(path)
                    content = i.get("content")
                    if not os.path.exists(parent_folder) and not preview_mode:
                        os.makedirs(parent_folder, 0o777)
                    if not os.path.exists(path):
                        if not preview_mode:
                            # create the file
                            fp = open(path, "wb")
                            fp.write(six.ensure_binary(content))
                            fp.close()
                            # and set permissions to open
                            os.chmod(path, 0o666)
                        locations.append(path)
        finally:
            # reset umask
            os.umask(old_umask)

        return locations
コード例 #5
0
    def __compute_cache_path(self, cache_seed=None):
        """
        Calculates and returns a cache path to use for this instance's query.

        :param cache_seed: Cache seed supplied to the ``__init__`` method.

        :return: The path to use when caching the model data.
        :rtype: str
        """

        # when we cache the data associated with this model, create
        # the file name and path based on several parameters.
        # the path will be on the form CACHE_LOCATION/cached_sg_queries/EntityType/params_hash/filter_hash
        #
        # params_hash is an md5 hash representing all parameters going into a particular
        # query setup and filters_hash is an md5 hash of the filter conditions.
        #
        # the reason these are split up is because the params tend to be constant and
        # the filters keep varying depending on user input.
        #
        # some comment regarding the fields that make up the hash
        #
        # fields, order, hierarchy are all coming from Shotgun
        # and are used to uniquely identify the cache file. Typically,
        # code using the shotgun model will keep these fields constant
        # while varying filters. With the filters hashed separately,
        # this typically generates a folder structure where there is one
        # top level folder containing a series of cache files
        # all for different filters.
        #
        # the schema generation is used for advanced implementations
        # See constructor docstring for details.
        #
        # bg_load_thumbs is hashed so that the system can cache
        # thumb and non-thumb caches independently. This is because
        # as soon as you start caching thumbnails, qpixmap will be used
        # internally by the serialization and this means that you get
        # warnings if you try to use those caches in threads. By keeping
        # caches separate, there is no risk that a thumb cache 'pollutes'
        # a non-thumb cache.
        #
        # now hash up the rest of the parameters and make that the filename
        params_hash = hashlib.md5()

        # FIXME: Python 2 and Python 3 order values differently in a dictionary,
        # which means that their string representation are going to differ
        # between Python versions
        #
        # As users are going to be drifting between Python 2 and Python 3 for a
        # while, a fully deterministic way of generating the cache name should
        # be implemented.
        #
        # A simple approach would be to encode the data in a JSON structured
        # with ordered keys and then having the text representation of that data.
        params_hash.update(six.ensure_binary(str(self.__schema_generation)))
        params_hash.update(six.ensure_binary(str(self.__fields)))
        params_hash.update(six.ensure_binary(str(self.__order)))
        params_hash.update(six.ensure_binary(str(self.__hierarchy)))
        # If this value changes over time (like between Qt4 and Qt5), we need to
        # assume our previous user roles are invalid since Qt might have taken over
        # it. If role's value is 32, don't add it to the hash so we don't
        # invalidate PySide/PyQt4 caches.
        if QtCore.Qt.UserRole != 32:
            params_hash.update(six.ensure_binary(str(QtCore.Qt.UserRole)))

        # now hash up the filter parameters and the seed - these are dynamic
        # values that tend to change and be data driven, so they are handled
        # on a different level in the path
        filter_hash = hashlib.md5()
        filter_hash.update(six.ensure_binary(str(self.__filters)))
        filter_hash.update(
            six.ensure_binary(str(self.__additional_filter_presets)))
        params_hash.update(six.ensure_binary(str(cache_seed)))

        # Organize files on disk based on entity type and then filter hash
        # keep extension names etc short in order to stay away from MAX_PATH
        # on windows.
        # Try to share the cache at the site level which was introduced in tk-core
        # > 0.18.118.
        # If not available, fallback on per project/pipeline config/plugin id
        # caching.
        if hasattr(self._bundle, "site_cache_location"):
            cache_location = self._bundle.site_cache_location
        else:
            cache_location = self._bundle.cache_location
        data_cache_path = os.path.join(
            cache_location,
            "sg",
            self.__entity_type,
            params_hash.hexdigest(),
            "%s.%s" %
            (filter_hash.hexdigest(), ShotgunFindDataHandler.FORMAT_VERSION),
        )

        if sgtk.util.is_windows() and len(data_cache_path) > 250:
            self._log_warning(
                "Shotgun model data cache file path may be affected by windows "
                "windows MAX_PATH limitation.")

        return data_cache_path
コード例 #6
0
    def _dispatch_to_endpoint(self, metrics):
        """
        Dispatch the supplied metric to the sg api registration endpoint.

        :param metrics: A list of :class:`EventMetric` instances.
        """

        # Filter out metrics we don't want to send to the endpoint.
        filtered_metrics_data = []

        for metric in metrics:
            data = metric.data
            # As second pass re-structure unsupported events from supported groups
            # (see more complete comment below)
            if metric.is_supported_event:
                # If this is a supported event, we just need to tack on the
                # version of the core api being used.
                data["event_properties"][
                    EventMetric.KEY_CORE_VERSION] = self._engine.sgtk.version
            else:
                # Still log the event but change its name so it's easy to
                # spot all unofficial events which are logged.
                # Later we might want to simply discard them instead of logging
                # them as "Unknown"
                # Forge a new properties dict with the original data under the
                # "Event Data" key
                properties = data["event_properties"]
                new_properties = {
                    "Event Name":
                    data["event_name"],
                    "Event Data":
                    properties,
                    EventMetric.KEY_APP:
                    properties.get(EventMetric.KEY_APP),
                    EventMetric.KEY_APP_VERSION:
                    properties.get(EventMetric.KEY_APP_VERSION),
                    EventMetric.KEY_ENGINE:
                    properties.get(EventMetric.KEY_ENGINE),
                    EventMetric.KEY_ENGINE_VERSION:
                    properties.get(EventMetric.KEY_ENGINE_VERSION),
                    EventMetric.KEY_HOST_APP:
                    properties.get(EventMetric.KEY_HOST_APP),
                    EventMetric.KEY_HOST_APP_VERSION:
                    properties.get(EventMetric.KEY_HOST_APP_VERSION),
                    EventMetric.KEY_CORE_VERSION:
                    self._engine.sgtk.version,
                }
                data["event_properties"] = new_properties
                data["event_name"] = "Unknown Event"
                data["event_group"] = EventMetric.GROUP_TOOLKIT

            filtered_metrics_data.append(data)

        # Bail out if there is nothing to do
        if not filtered_metrics_data:
            return

        # get this thread's sg connection via tk api
        sg_connection = self._engine.tank.shotgun

        # handle proxy setup by pulling the proxy details from the main
        # shotgun connection
        if sg_connection.config.proxy_handler:
            opener = urllib.request.build_opener(
                sg_connection.config.proxy_handler)
            urllib.request.install_opener(opener)

        # build the full endpoint url with the shotgun site url
        url = "%s/%s" % (sg_connection.base_url, self.API_ENDPOINT)

        # construct the payload with the auth args and metrics data
        payload = {
            "auth_args": {
                "session_token": sg_connection.get_session_token()
            },
            "metrics": filtered_metrics_data,
        }
        payload_json = six.ensure_binary(json.dumps(payload))

        header = {"Content-Type": "application/json"}
        try:
            request = urllib.request.Request(url, payload_json, header)
            urllib.request.urlopen(request)
        except urllib.error.HTTPError:
            # fire and forget, so if there's an error, ignore it.
            pass