Esempio n. 1
0
    def bulk_record(self, organization_id, feature_slugs, **kwargs):
        features = []

        try:
            feature_ids = set([manager.get_by_slug(slug).id for slug in feature_slugs])
        except UnknownFeature as e:
            logger.exception(e)
            return False

        incomplete_feature_ids = feature_ids - self.get_all_cache(organization_id)

        if not incomplete_feature_ids:
            return False

        for feature_id in incomplete_feature_ids:
            features.append(
                FeatureAdoption(
                    organization_id=organization_id, feature_id=feature_id, complete=True
                )
            )
        try:
            with transaction.atomic():
                self.bulk_create(features)
                return True

        except IntegrityError:
            # This can occur if redis somehow loses the set of complete features and
            # we attempt to insert duplicate (org_id, feature_id) rows
            # This also will happen if we get parallel processes running `bulk_record` and
            # `get_all_cache` returns in the second process before the first process
            # can `bulk_set_cache`.
            return False
        finally:
            return self.bulk_set_cache(organization_id, *incomplete_feature_ids)
Esempio n. 2
0
    def bulk_record(self, organization_id, feature_slugs, **kwargs):
        features = []

        try:
            feature_ids = set(
                [manager.get_by_slug(slug).id for slug in feature_slugs])
        except UnknownFeature as e:
            logger.exception(e)
            return

        incomplete_feature_ids = feature_ids - self.get_all_cache(
            organization_id)

        for feature_id in incomplete_feature_ids:
            features.append(
                FeatureAdoption(organization_id=organization_id,
                                feature_id=feature_id,
                                complete=True))
        try:
            with transaction.atomic():
                self.bulk_create(features)
        except IntegrityError as e:
            # This can occur if redis somehow loses the set of complete features and we attempt to insert duplicate (org_id, feature_id) rows
            logger.exception(e)
            return
        finally:
            self.bulk_set_cache(organization_id, *incomplete_feature_ids)
Esempio n. 3
0
    def bulk_record(self, organization_id, feature_slugs, **kwargs):
        features = []

        try:
            feature_ids = set([manager.get_by_slug(slug).id for slug in feature_slugs])
        except UnknownFeature as e:
            logger.exception(e)
            return False

        incomplete_feature_ids = feature_ids - self.get_all_cache(organization_id)

        if not incomplete_feature_ids:
            return False

        for feature_id in incomplete_feature_ids:
            features.append(
                FeatureAdoption(
                    organization_id=organization_id, feature_id=feature_id, complete=True
                )
            )
        try:
            with transaction.atomic():
                self.bulk_create(features)
                return True

        except IntegrityError:
            # This can occur if redis somehow loses the set of complete features and
            # we attempt to insert duplicate (org_id, feature_id) rows
            # This also will happen if we get parallel processes running `bulk_record` and
            # `get_all_cache` returns in the second process before the first process
            # can `bulk_set_cache`.
            return False
        finally:
            return self.bulk_set_cache(organization_id, *incomplete_feature_ids)
Esempio n. 4
0
    def record(self, organization_id, feature_slug, **kwargs):
        try:
            feature_id = manager.get_by_slug(feature_slug).id
        except UnknownFeature as e:
            logger.exception(e)
            return False

        if not self.in_cache(organization_id, feature_id):
            row, created = self.create_or_update(
                organization_id=organization_id, feature_id=feature_id, complete=True
            )
            self.set_cache(organization_id, feature_id)
            return created

        return False
Esempio n. 5
0
    def record(self, organization_id, feature_slug, **kwargs):
        try:
            feature_id = manager.get_by_slug(feature_slug).id
        except UnknownFeature as e:
            logger.exception(e)
            return False

        if not self.in_cache(organization_id, feature_id):
            row, created = self.create_or_update(
                organization_id=organization_id, feature_id=feature_id, complete=True
            )
            self.set_cache(organization_id, feature_id)
            return created

        return False
Esempio n. 6
0
 def get_by_slug(self, organization, slug):
     return self.filter(organization=organization,
                        feature_id=manager.get_by_slug(slug).id).first()
Esempio n. 7
0
 def get_by_slug(self, organization, slug):
     return self.filter(
         organization=organization, feature_id=manager.get_by_slug(slug).id
     ).first()