Ejemplo n.º 1
0
    def get_parent(self, attr_name):
        # first, try grabbing it directly
        parent = getattr(self, attr_name)
        if parent:
            return parent

        # if nothing was found, grab the fk and lookup manually
        attr = getattr(type(self), attr_name)
        prop = attr.property
        local_col, remote_col = prop.local_remote_pairs[0]
        local_key = local_col.key
        value = getattr(self, local_key)
        if not value:
            # no relation and no fk
            return None

        filters = {remote_col.key: value}
        parent_cls = prop.argument
        if isinstance(parent_cls, FunctionType):
            parent_cls = parent_cls()
        if hasattr(parent_cls, 'is_mapper') and parent_cls.is_mapper:
            # we found a mapper, grab the class from it
            parent_cls = parent_cls.class_
        session = Session()
        parent = session.query(parent_cls).filter_by(**filters).first()
        return parent
Ejemplo n.º 2
0
    def get_parent(self, attr_name):
        # first, try grabbing it directly
        parent = getattr(self, attr_name)
        if parent:
            return parent
            
        # if nothing was found, grab the fk and lookup manually
        attr = getattr(type(self), attr_name)
        prop = attr.property
        local_col, remote_col = prop.local_remote_pairs[0]
        local_key = local_col.key
        value = getattr(self, local_key)
        if not value:
            # no relation and no fk
            return None

        filters = {remote_col.key: value}
        parent_cls = prop.argument
        if isinstance(parent_cls, FunctionType):
            parent_cls = parent_cls()
        if hasattr(parent_cls, 'is_mapper') and parent_cls.is_mapper:
            # we found a mapper, grab the class from it
            parent_cls = parent_cls.class_
        session = Session()
        parent = session.query(parent_cls).filter_by(**filters).first()
        return parent
Ejemplo n.º 3
0
    def handle_noargs(self, **options):
        #db = options.get('database', DEFAULT_DB_ALIAS)
        #connection = connections[db]
        verbosity = int(options.get('verbosity', 1))
        interactive = options.get('interactive')

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module('.management', app_name)
            except ImportError:
                pass

        #sql_list = sql_flush(self.style, connection, only_django=True)

        if interactive:
            confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?

    Type 'yes' to continue, or 'no' to cancel: """)
        else:
            confirm = 'yes'

        if confirm == 'yes':
            try:
                session = Session()
                for table in reversed(Base.metadata.sorted_tables):
                    if table.info.get('preserve_during_flush', False):
                        continue
                    try:
                        session.execute(table.delete())
                    except:
                        pass
                session.commit()
            except Exception, e:
                session.rollback()
                raise
                raise CommandError("""Database couldn't be flushed. Possible reasons:
  * The database isn't running or isn't configured correctly.
  * At least one of the expected database tables doesn't exist.
  * The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""")

            # Emit the post sync signal. This allows individual
            # applications to respond as if the database had been
            # sync'd from scratch.
            all_models = []
            for app in get_apps():
                all_models.extend([
                    m for m in get_models(app, include_auto_created=True)
                ])
            emit_post_sync_signal(set(all_models), verbosity, interactive, None) 

            # Reinstall the initial_data fixture.
            if options.get('load_initial_data'):
                # Reinstall the initial_data fixture.
                call_command('loaddata', 'initial_data', **options)
Ejemplo n.º 4
0
    def get_cache_keys(self, child_updated=False):
        #print 'getting keys from', self
        cache_keys = set()
        version_keys = set()

        if not any(getattr(self._meta, k) for k in [
            'cache_detail_keys',
            'cache_list_keys',
            'cache_pointers',
            'cache_cascades',
            'cache_relations',
            ]):
            return cache_keys, version_keys
            
        session = Session.object_session(self)
        deleted = self.is_deleted or self in session.deleted
        data = instance_dict(self)
        cache = get_cache('objects')

        # get a list of all fields which changed
        changed_keys = []
        for attr in self.__mapper__.iterate_properties:
            if not isinstance(attr, ColumnProperty) and \
               attr.key not in self._meta.cache_relations:
                continue
            if attr.key in IGNORABLE_KEYS:
                continue
            ins, eq, rm = get_history(self, attr.key)
            if ins or rm:
                changed_keys.append(attr.key)
        self_updated = bool(changed_keys) or deleted

        if not self_updated and not child_updated:
            return (cache_keys, version_keys)

        if has_identity(self):
            # we only kill primary cache keys if the object exists
            # this key won't exist during CREATE
            for raw_key, attrs in self._meta.cache_detail_keys:
                if attrs and not any(key in changed_keys for key in attrs):
                    # the fields which trigger this key were not changed
                    continue
                cache_key = self.format_key(raw_key % data)
                cache_keys.add(cache_key)

        # collections will be altered by any action, so we always
        # kill these keys
        for raw_key, attrs in self._meta.cache_list_keys:
            if attrs and not any(key in changed_keys for key in attrs):
                # the fields which trigger this key were not changed
                continue
            cache_key = raw_key % data
            version_keys.add(cache_key)

        # pointer records contain only the id of the parent resource
        # if changed, we set the old key to False, and set the new key
        for raw_key, attrs, name in self._meta.cache_pointers:
            if attrs and not any(key in changed_keys for key in attrs):
                # the fields which trigger this key were not changed
                continue
            cache_key = raw_key % data
            c, idkey = identity_key(instance=self)
            if len(idkey) > 1:
                idkey = ','.join(str(i) for i in idkey)
            else:
                idkey = idkey[0]
            if not self.is_deleted:
                cache.set(cache_key, idkey)

            # if this is an existing object, we need to handle the old key
            if not has_identity(self):
                continue

            old_data = {}
            for attr in attrs:
                ins,eq,rm = get_history(self, attr)
                old_data[attr] = rm[0] if rm else eq[0]
            old_key = raw_key % old_data
            if old_key == cache_key and not self.is_deleted:
                continue
            old_idkey = cache.get(old_key)
            if old_idkey == idkey:
                # this object is the current owner of the key
                #print 'setting %s to False' % old_key, old_idkey, idkey
                cache.set(old_key, False)

        # cascade the cache kill operation to related objects, so parents
        # know if children have changed, in order to rebuild the cache
        for cascade in self._meta.cache_cascades:
            objs = getattr(self, cascade)
            if not objs:
                continue
            if not isinstance(objs, list):
                objs = [objs]
            for obj in objs:
                k1,k2 = obj.get_cache_keys(child_updated=True)
                cache_keys.update(k1)
                version_keys.update(k2)

        return (cache_keys, version_keys)