Esempio n. 1
0
class BaseModel(Document):
    """Base class for MongoDB models.
    """
    createTime = DateTimeField(required=True)
    createBy = ReferenceField('User')
    updateTime = DateTimeField(required=True)
    updateBy = ReferenceField('User')
    meta = {'abstract': True}

    @classmethod
    def clean_attributes(cls, **attributes):
        """The 'clean' method used by the 'save_and_index' method.
        """
        return attributes

    @classmethod
    def save_and_index(cls,
                       user=None,
                       id=None,
                       given_id=None,
                       old_update_time=None,
                       **attributes):
        """The recommended unified method for saving, updating and consistent updating a MongoDB document.

        Calling this method will record the instance's create time and update time automatically, and also index it
        for elasticsearch if necessary.
        """
        if not user:
            raise Exception
        attributes, now = cls.clean_attributes(**attributes), datetime.now()
        if given_id:
            # Save with given ID.
            instance = cls(id=given_id,
                           createBy=user,
                           updateBy=user,
                           createTime=now,
                           updateTime=now,
                           **attributes).save()
            instance.reload()
        elif not id:
            # Create a new document.
            instance = cls(id=None,
                           createBy=user,
                           updateBy=user,
                           createTime=now,
                           updateTime=now,
                           **attributes).save()
            instance.reload()
        elif not old_update_time:
            # Update.
            if cls.objects(id=id).update_one(upsert=False,
                                             updateBy=user,
                                             updateTime=now,
                                             **attributes) == 0:
                raise Exception(
                    'The {0} being updated does not exist.'.format(cls))
            instance = cls.objects.get(id=id)
        else:
            # Consistent update.
            if cls.objects(id=id, updateBy=user, updateTime=old_update_time).\
                    update_one(upsert=False, updateTime=now, **attributes) == 0:
                raise Exception(
                    'The {0} being updated does not exist or is out dated.'.
                    format(cls))
            instance = cls.objects.get(id=id)
        # Index the instance if necessary.
        instance.index_for_search()
        return instance

    @staticmethod
    def paginate_query_set(query_set, page_num, page_size):
        """Paginate a mongoengine query set.
        """
        page_num, page_count = BaseModel.__calc_page_num_and_page_count(
            query_set.count(), page_num, page_size)
        return query_set[page_size * page_num:page_size *
                         (page_num + 1)], page_num, page_count

    def delete(self, **write_concern):
        """Delete a document from MongoDB, also delete it from elasticsearch if necessary.
        """
        super().delete(**write_concern)
        self.__class__.delete_index(self.id)

    def index_for_search(self):
        """Index a MongoDB document for elasticsearch if necessary.

        1) If you want a subclass to be indexed in elasticsearch, it should implement the 'to_search_doc' method.
        2) This method is called by the 'save_and_index' method automatically, so you need to call it manually
        only when you see 'Failed to index ...' message in the error log.
        """
        if not self.id or not hasattr(self, 'to_search_doc'):
            return
        try:
            search_doc = self.to_search_doc()
            _elasticsearch.index(index=options.elasticsearch_index,
                                 doc_type=self.__class__.__name__.lower(),
                                 body=search_doc,
                                 id=str(self.id))
        except:
            logging.error(
                'Failed to index {{class={0}.{1}, id={2}}} for search.'.format(
                    self.__class__.__module__, self.__class__.__name__,
                    self.id))

    @classmethod
    def do_search(cls, query, page_num, page_size, **kwargs):
        """Do perform an elasticsearch search operation, return the paginated result.
        """
        class SearchResult:
            def __init__(self, hit):
                self.id = hit['_id']
                for key, value in hit['_source'].items():
                    if key.startswith('_'):
                        pass
                    else:
                        self.__dict__[key] = value

        search_result = _elasticsearch.search(
            index=options.elasticsearch_index,
            doc_type=cls.__name__.lower(),
            body={'query': query},
            size=page_size,
            from_=page_size * page_num,
            **kwargs)
        if not search_result or search_result['timed_out'] or search_result[
                'hits']['total'] == 0:
            return 0, 1, []
        page_num, page_count = BaseModel.__calc_page_num_and_page_count(
            search_result['hits']['total'], page_num, page_size)
        return page_num, page_count, [
            SearchResult(hit) for hit in search_result['hits']['hits']
        ]

    @classmethod
    def delete_index(cls, id):
        """Delete a document from elasticsearch.

        This method is called by the 'delete' method automatically, so you need to call it manually
        only when you see 'Failed to delete ...' message in the error log.
        """
        if not hasattr(cls, 'to_search_doc'):
            return
        try:
            _elasticsearch.delete(index=options.elasticsearch_index,
                                  doc_type=cls.__name__.lower(),
                                  id=str(id))
        except:
            logging.error(
                'Failed to delete index for {{class={0}.{1}, id={2}}}.'.format(
                    cls.__module__, cls.__name__, id))

    def to_vo(self, **kwargs):
        return {
            'id': str(self.id),
            'createTime': int(self.createTime.timestamp() * 1000),
            'updateTime': int(self.updateTime.timestamp() * 1000)
        }

    @staticmethod
    def __calc_page_num_and_page_count(total_count, page_num, page_size):
        """Calculate page number and page count based on given total items count, page number and page size.
        """
        page_count = max(math.ceil(total_count / page_size), 1)
        if page_num > page_count - 1:
            page_num = page_count - 1
        if page_num < 0:
            page_num = 0
        return page_num, page_count
Esempio n. 2
0
class AuditTrail(Document):
    meta = {
        "collection":
        "noc.audittrail",
        "strict":
        False,
        "auto_create_index":
        False,
        "indexes": [
            "timestamp",
            ("model_id", "object"),
            {
                "fields": ["expires"],
                "expireAfterSeconds": 0
            },
        ],
    }

    timestamp = DateTimeField()
    user = StringField()
    model_id = StringField()
    object = StringField()
    op = StringField(choices=[("C", "Create"), ("M", "Modify"), ("D",
                                                                 "Delete")])
    changes = ListField(EmbeddedDocumentField(FieldChange))
    expires = DateTimeField()

    EXCLUDE = {
        "admin.logentry",
        "main.audittrail",
        "kb.kbentryhistory",
        "kb.kbentrypreviewlog",
        "sa.maptask",
        "sa.reducetask",
    }

    DEFAULT_TTL = config.audit.db_ttl
    _model_ttls = {}

    @classmethod
    def log(cls, sender, instance, op, changes):
        """
        Log into audit trail
        """
        user = get_user()  # Retrieve user from thread local storage
        if not user or not user.is_authenticated():
            return  # No user initialized, no audit trail
        if not changes:
            logger.debug("Nothing to log for %s", instance)
            return
        now = datetime.datetime.now()
        model_id = get_model_id(sender)
        cls._get_collection().insert(
            {
                "timestamp": now,
                "user": user.username,
                "model_id": model_id,
                "object": str(instance.pk),
                "op": op,
                "changes": changes,
                "expires": now + cls._model_ttls[model_id],
            },
            w=0,
        )

    @classmethod
    def get_field(cls, instance, field):
        if field.value_from_object(instance) is None:
            return None
        return field.value_to_string(instance)

    @classmethod
    def on_update_model(cls, sender, instance, **kwargs):
        """
        Audit trail for INSERT and UPDATE operations
        """
        #
        logger.debug("Logging change for %s", instance)
        changes = []
        if kwargs.get("created", True):
            # Create
            op = "C"
            changes = [{
                "field": f.name,
                "old": None,
                "new": cls.get_field(instance, f)
            } for f in sender._meta.fields]
        else:
            # Update
            op = "U"
            for f in sender._meta.fields:
                od = instance._old_values.get(f.attname)
                if od is not None:
                    od = smart_text(od)
                nd = cls.get_field(instance, f)
                if nd != od:
                    changes += [{"field": f.name, "old": od, "new": nd}]

        cls.log(sender, instance, op, changes)

    @classmethod
    def on_delete_model(cls, sender, instance, **kwargs):
        """
        Audit trail for DELETE operation
        """
        #
        logger.debug("Logging deletion of %s", instance)
        changes = [{
            "field": f.name,
            "old": cls.get_field(instance, f),
            "new": None
        } for f in sender._meta.fields]
        cls.log(sender, instance, "D", changes)

    @classmethod
    def on_init_model(cls, sender, instance, **kwargs):
        """
        Preserve original values
        """
        instance._old_values = dict(instance.__dict__)

    @classmethod
    def get_model_ttl(cls, model_id):
        return datetime.timedelta(seconds=cls.DEFAULT_TTL)

    @classmethod
    def on_new_model(cls, sender, **kwargs):
        if str(sender._meta) in cls.EXCLUDE:
            return  # Ignore model
        model_id = get_model_id(sender)
        ttl = cls.get_model_ttl(model_id)
        if not ttl:
            return  # Disabled
        cls._model_ttls[model_id] = ttl
        django_signals.post_save.connect(cls.on_update_model, sender=sender)
        django_signals.post_delete.connect(cls.on_delete_model, sender=sender)
        django_signals.post_init.connect(cls.on_init_model, sender=sender)

    @classmethod
    def install(cls):
        """
        Install signal handlers
        """
        django_signals.class_prepared.connect(cls.on_new_model)
Esempio n. 3
0
class Cert(Document):
    """Certificate mongo document model."""

    log_id = IntField(primary_key=True)
    # serial is 20 octets, see: https://tools.ietf.org/html/rfc5280#section-4.1.2.2
    serial = StringField(required=True)
    issuer = StringField(required=True)
    not_before = DateTimeField(required=True)
    not_after = DateTimeField(required=True)
    sct_or_not_before = DateTimeField(required=True)
    sct_exists = BooleanField(required=True)
    pem = StringField(required=True)
    _subjects = ListField(required=True, field=StringField(), db_field="subjects")
    _trimmed_subjects = ListField(
        required=True, field=StringField(), db_field="trimmed_subjects"
    )

    meta = {
        "collection": "certs",
        "indexes": [
            "+_subjects",
            "+_trimmed_subjects",
            {"fields": ("+issuer", "+serial"), "unique": True},
        ],
    }

    @property
    def subjects(self):
        """Getter for subjects."""
        return self._subjects

    @subjects.setter
    def subjects(self, values):
        """Subjects setter.

        Normalizes inputs, and dervices trimmed_subjects
        """
        self._subjects = list({i.lower() for i in values})
        self._trimmed_subjects = list(trim_domains(self._subjects))

    @property
    def trimmed_subjects(self):
        """Read-only property.  This is derived from the subjects."""
        return self._trimmed_subjects

    def to_x509(self):
        """Return an x509 subject for this certificate."""
        return x509.load_pem_x509_certificate(
            bytes(self.pem, "utf-8"), default_backend()
        )

    @classmethod
    def from_pem(cls, pem):
        """Create a Cert model object from a PEM certificate string.

        Arguments:
        pem -- PEM encoded certificate

        Returns (cert, precert):
            cert: a Cert model object
            precert: a boolean, True if this is a precertificate, False otherwise

        """
        xcert = x509.load_pem_x509_certificate(bytes(pem, "utf-8"), default_backend())
        dns_names = get_sans_set(xcert)

        sct_or_not_before, sct_exists = get_earliest_sct(xcert)

        cert = cls()
        cert.serial = hex(xcert.serial_number)[2:]
        cert.issuer = xcert.issuer.rfc4514_string()
        cert.not_before = xcert.not_valid_before
        cert.not_after = xcert.not_valid_after
        cert.sct_or_not_before = sct_or_not_before
        cert.sct_exists = sct_exists
        cert.pem = pem
        cert.subjects = dns_names
        return cert, is_poisioned(xcert)
Esempio n. 4
0
class Dataset(WithMetrics, BadgeMixin, db.Owned, db.Document):
    created_at = DateTimeField(verbose_name=_('Creation date'),
                               default=datetime.now,
                               required=True)
    last_modified = DateTimeField(verbose_name=_('Last modification date'),
                                  default=datetime.now,
                                  required=True)
    title = db.StringField(required=True)
    acronym = db.StringField(max_length=128)
    # /!\ do not set directly the slug when creating or updating a dataset
    # this will break the search indexation
    slug = db.SlugField(max_length=255,
                        required=True,
                        populate_from='title',
                        update=True,
                        follow=True)
    description = db.StringField(required=True, default='')
    license = db.ReferenceField('License')

    tags = db.TagListField()
    resources = db.ListField(db.EmbeddedDocumentField(Resource))

    private = db.BooleanField(default=False)
    frequency = db.StringField(choices=list(UPDATE_FREQUENCIES.keys()))
    frequency_date = db.DateTimeField(verbose_name=_('Future date of update'))
    temporal_coverage = db.EmbeddedDocumentField(db.DateRange)
    spatial = db.EmbeddedDocumentField(SpatialCoverage)

    ext = db.MapField(db.GenericEmbeddedDocumentField())
    extras = db.ExtrasField()

    featured = db.BooleanField(required=True, default=False)

    deleted = db.DateTimeField()
    archived = db.DateTimeField()

    def __str__(self):
        return self.title or ''

    __badges__ = {
        PIVOTAL_DATA: _('Pivotal data'),
    }

    meta = {
        'indexes': [
            '-created_at',
            'slug',
            'resources.id',
            'resources.urlhash',
        ] + db.Owned.meta['indexes'],
        'ordering': ['-created_at'],
        'queryset_class':
        DatasetQuerySet,
    }

    before_save = signal('Dataset.before_save')
    after_save = signal('Dataset.after_save')
    on_create = signal('Dataset.on_create')
    on_update = signal('Dataset.on_update')
    before_delete = signal('Dataset.before_delete')
    after_delete = signal('Dataset.after_delete')
    on_delete = signal('Dataset.on_delete')
    on_archive = signal('Dataset.on_archive')
    on_resource_added = signal('Dataset.on_resource_added')

    verbose_name = _('dataset')

    @classmethod
    def pre_save(cls, sender, document, **kwargs):
        cls.before_save.send(document)

    @classmethod
    def post_save(cls, sender, document, **kwargs):
        if 'post_save' in kwargs.get('ignores', []):
            return
        cls.after_save.send(document)
        if kwargs.get('created'):
            cls.on_create.send(document)
        else:
            cls.on_update.send(document)
        if document.deleted:
            cls.on_delete.send(document)
        if document.archived:
            cls.on_archive.send(document)
        if kwargs.get('resource_added'):
            cls.on_resource_added.send(document,
                                       resource_id=kwargs['resource_added'])

    def clean(self):
        super(Dataset, self).clean()
        if self.frequency in LEGACY_FREQUENCIES:
            self.frequency = LEGACY_FREQUENCIES[self.frequency]

    def url_for(self, *args, **kwargs):
        return url_for('datasets.show', dataset=self, *args, **kwargs)

    display_url = property(url_for)

    @property
    def is_visible(self):
        return not self.is_hidden

    @property
    def is_hidden(self):
        return (len(self.resources) == 0 or self.private or self.deleted
                or self.archived)

    @property
    def full_title(self):
        if not self.acronym:
            return self.title
        return '{title} ({acronym})'.format(**self._data)

    @property
    def external_url(self):
        return self.url_for(_external=True)

    @property
    def image_url(self):
        if self.organization:
            return self.organization.logo.url
        elif self.owner:
            return self.owner.avatar.url

    @property
    def frequency_label(self):
        return UPDATE_FREQUENCIES.get(self.frequency or 'unknown',
                                      UPDATE_FREQUENCIES['unknown'])

    def check_availability(self):
        """Check if resources from that dataset are available.

        Return a list of (boolean or 'unknown')
        """
        # Only check remote resources.
        remote_resources = [
            resource for resource in self.resources
            if resource.filetype == 'remote'
        ]
        if not remote_resources:
            return []
        return [resource.check_availability() for resource in remote_resources]

    @property
    def last_update(self):
        if self.resources:
            return max(resource.published for resource in self.resources)
        else:
            return self.last_modified

    @property
    def next_update(self):
        """Compute the next expected update date,

        given the frequency and last_update.
        Return None if the frequency is not handled.
        """
        delta = None
        if self.frequency == 'daily':
            delta = timedelta(days=1)
        elif self.frequency == 'weekly':
            delta = timedelta(weeks=1)
        elif self.frequency == 'fortnighly':
            delta = timedelta(weeks=2)
        elif self.frequency == 'monthly':
            delta = timedelta(weeks=4)
        elif self.frequency == 'bimonthly':
            delta = timedelta(weeks=4 * 2)
        elif self.frequency == 'quarterly':
            delta = timedelta(weeks=52 / 4)
        elif self.frequency == 'biannual':
            delta = timedelta(weeks=52 / 2)
        elif self.frequency == 'annual':
            delta = timedelta(weeks=52)
        elif self.frequency == 'biennial':
            delta = timedelta(weeks=52 * 2)
        elif self.frequency == 'triennial':
            delta = timedelta(weeks=52 * 3)
        elif self.frequency == 'quinquennial':
            delta = timedelta(weeks=52 * 5)
        if delta is None:
            return
        else:
            return self.last_update + delta

    @cached_property
    def quality(self):
        """Return a dict filled with metrics related to the inner

        quality of the dataset:

            * number of tags
            * description length
            * and so on
        """
        from udata.models import Discussion  # noqa: Prevent circular imports
        result = {}
        if not self.id:
            # Quality is only relevant on saved Datasets
            return result
        if self.frequency != 'unknown':
            result['frequency'] = self.frequency
        if self.next_update:
            result['update_in'] = -(self.next_update - datetime.now()).days
        if self.tags:
            result['tags_count'] = len(self.tags)
        if self.description:
            result['description_length'] = len(self.description)
        if self.resources:
            result['has_resources'] = True
            result['has_only_closed_or_no_formats'] = all(
                resource.closed_or_no_format for resource in self.resources)
            result['has_unavailable_resources'] = not all(
                self.check_availability())
        discussions = Discussion.objects(subject=self)
        if discussions:
            result['discussions'] = len(discussions)
            result['has_untreated_discussions'] = not all(
                discussion.person_involved(self.owner)
                for discussion in discussions)
        result['score'] = self.compute_quality_score(result)
        return result

    def compute_quality_score(self, quality):
        """Compute the score related to the quality of that dataset."""
        score = 0
        UNIT = 2
        if 'update_in' in quality:
            # TODO: should be related to frequency.
            if quality['update_in'] < 0:
                score += UNIT
            else:
                score -= UNIT
        if 'tags_count' in quality:
            if quality['tags_count'] > 3:
                score += UNIT
        if 'description_length' in quality:
            if quality['description_length'] > 100:
                score += UNIT
        if 'has_resources' in quality:
            if quality['has_only_closed_or_no_formats']:
                score -= UNIT
            else:
                score += UNIT
            if quality['has_unavailable_resources']:
                score -= UNIT
            else:
                score += UNIT
        if 'discussions' in quality:
            if quality['has_untreated_discussions']:
                score -= UNIT
            else:
                score += UNIT
        if score < 0:
            return 0
        return score

    @classmethod
    def get(cls, id_or_slug):
        obj = cls.objects(slug=id_or_slug).first()
        return obj or cls.objects.get_or_404(id=id_or_slug)

    def add_resource(self, resource):
        '''Perform an atomic prepend for a new resource'''
        resource.validate()
        self.update(
            __raw__={
                '$push': {
                    'resources': {
                        '$each': [resource.to_mongo()],
                        '$position': 0
                    }
                }
            })
        self.reload()
        post_save.send(self.__class__,
                       document=self,
                       resource_added=resource.id)

    def update_resource(self, resource):
        '''Perform an atomic update for an existing resource'''
        index = self.resources.index(resource)
        data = {'resources__{index}'.format(index=index): resource}
        self.update(**data)
        self.reload()
        post_save.send(self.__class__, document=self)

    @property
    def community_resources(self):
        return self.id and CommunityResource.objects.filter(dataset=self) or []

    @cached_property
    def json_ld(self):
        result = {
            '@context':
            'http://schema.org',
            '@type':
            'Dataset',
            '@id':
            str(self.id),
            'alternateName':
            self.slug,
            'dateCreated':
            self.created_at.isoformat(),
            'dateModified':
            self.last_modified.isoformat(),
            'url':
            url_for('datasets.show', dataset=self, _external=True),
            'name':
            self.title,
            'keywords':
            ','.join(self.tags),
            'distribution': [resource.json_ld for resource in self.resources],
            # Theses values are not standard
            'contributedDistribution':
            [resource.json_ld for resource in self.community_resources],
            'extras':
            [get_json_ld_extra(*item) for item in self.extras.items()],
        }

        if self.description:
            result['description'] = mdstrip(self.description)

        if self.license and self.license.url:
            result['license'] = self.license.url

        if self.organization:
            author = self.organization.json_ld
        elif self.owner:
            author = self.owner.json_ld
        else:
            author = None

        if author:
            result['author'] = author

        return result
Esempio n. 5
0
class KeywordLocker(Document):

    PLATFORM_CHOICES = (('yd', '移动平台'), ('pc', 'PC平台'))

    shop_id = IntField(verbose_name='店铺ID', required=True)
    campaign_id = IntField(verbose_name='计划ID', required=True)
    adgroup_id = IntField(verbose_name='推广宝贝ID', required=True)
    keyword_id = IntField(verbose_name='关键词ID', primary_key=True)
    word = StringField(verbose_name='关键词', max_length=100)
    exp_rank_range = ListField(verbose_name='期望排名区间', default=[1, 20])
    limit_price = IntField(verbose_name='最高限价', default=500)  # 单位 分
    platform = StringField(verbose_name='所抢平台',
                           choices=PLATFORM_CHOICES,
                           default='pc')
    nearly_success = IntField(verbose_name='允许接近成功', choices=(0, 1),
                              default=0)  # 值为1时, 表示如果抢不到期望排名,接近期望排名也可以.

    # 锁排名比手工抢排名多用到 的字段
    start_time = StringField(verbose_name='每天开始抢排名时间', default='00:00')
    end_time = StringField(verbose_name='每天结束抢排名时间', default='23:59')
    next_run_time = DateTimeField(verbose_name='下次运行时间',
                                  default=datetime.datetime.now)
    last_run_time = DateTimeField(
        verbose_name='任务开始运行时间')  # 记录任务开始执行的时间,方便回收1小时前的正在运行的任务。
    is_running = IntField(verbose_name='任务运行状态', default=0)
    is_stop = IntField(verbose_name='是否暂停', choices=(0, 1), default=0)
    is_auto_robrank = BooleanField(verbose_name='是否是自动抢排名', default=False)

    meta = {
        'db_alias': "mnt-db",
        'collection': 'engine_kwlocker',
        'indexes': ['shop_id', 'campaign_id', 'adgroup_id', 'word']
    }

    MOST_TRY_TIMES = 6

    @property
    def tapi(self):
        if not hasattr(self, '_tapi'):
            self._tapi = get_tapi(shop_id=self.shop_id)
        return self._tapi

    @property
    def forecast_price(self):
        if not hasattr(self, '_forecast_price'):
            # 由于淘宝下架了预估排名接口,这里就直接返回0,等淘宝再次上线后,再更新此函数
            self._forecast_price = 0
        return self._forecast_price

    @property
    def back_price(self):
        '''抢排名失败时,还原到什么价格'''
        if not hasattr(self, '_back_price'):
            self._back_price = min(self.limit_price, max(5, self.old_price))
        return self._back_price

    def add_alg_property(self, cur_price):
        self.old_price = cur_price  # 抢排名前的出价
        self.test_price = cur_price  # 测试出价
        self.cur_price = cur_price  # 当前关键词排名
        self.cur_rank = -1  # 当前关键词排名
        self.cur_rank_dict = {
            'pc': 9,
            'yd': 12,
            'pc_desc': '5页以后',
            'yd_desc': '20条以后'
        }
        self.head_test_price = 0  # 二分法的时候,排在前面的价格
        self.tail_test_price = 0  # 二分法的时候,排在后面的价格
        self.try_times = 0  # 抢排名次数
        self.fail_reason = ''
        self.result_flag = 'doing'
        self.last_result_flag = 'doing'
        self.last_price = 0
        self.last_rank = 101
        self.fininshed_status_list = ['nearly_ok', 'ok', 'failed']
        self.rollback_status_list = ['nearly_ok_ing', 'ok_ing', 'failed_ing']
        # result_flag 测试结果, 'doing','waiting' | 'ok', 'nearly_ok' 'failed')
        # 抢排名中,尚未下结论 'doing',不断地改价,尝试
        # |--> 失败 'waiting'
        #   |--> 当前排名已成功,或接近成功,结束 'ok', 'nearly_ok'
        #   |--> 上次排名已成功,或接近成功,改回上一次出价 'nearly_ok_ing', 'ok_ing'
        #       |--> 改价成功 'nearly_ok', 'ok'
        #       |--> 改价失败,重试改价
        #   |--> 标记为失败,改回原价 'failed_ing'
        #       |--> 改价成功 'failed'
        #       |--> 改价失败,重试改价

    def check_is_ok(self, checking_rank):
        return self.exp_rank_range[0] <= checking_rank <= self.exp_rank_range[1]

    def check_price(self, price):
        return 5 <= price <= self.limit_price

    def check_test_price(self):
        return self.test_price != self.cur_price and self.check_price(
            self.test_price)

    def check_nearly_success(self, checking_rank):
        return self.nearly_success and checking_rank - self.exp_rank_range[
            1] <= 3

    def calc_test_price(self):
        if (not self.head_test_price) or (
                not self.tail_test_price):  # 第一次计算出价时
            self.head_test_price = self.limit_price
            self.tail_test_price = min(5, self.limit_price)  # 最低出价 0.05元
            if self.forecast_price:
                self.test_price = min(self.forecast_price, self.limit_price)
                return True
        if self.cur_rank <= self.exp_rank_range[1]:
            self.head_test_price = self.cur_price - 1
            self.test_price = int(self.cur_price * 0.95)
            if self.test_price <= self.tail_test_price < self.head_test_price:
                self.test_price = (self.head_test_price +
                                   self.tail_test_price) / 2
        else:
            self.tail_test_price = self.cur_price + 1
            self.test_price = (self.head_test_price + self.tail_test_price +
                               1) / 2  # 向上取整
        self.test_price = max(self.test_price, 5)
        self.test_price = min(self.test_price, self.limit_price)
        return True

    def save_log(self, desc):
        log.info(
            '[rob_rank] word=%s, %s, test_price=%s, try_times=%s, exp_rank=%s, limit_price=%s, old_price=%s, %s'
            % (self.word, self.platform, self.test_price, self.try_times,
               self.exp_rank_range, self.limit_price, self.old_price, desc))

    def save_process(self, desc):
        # log.info('%s, %s, %s, %s, %s, %s, %s' % (self.keyword_id, self.word, self.cur_price, self.limit_price, desc, self.cur_rank_dict['%s_desc' % self.platform], self.result_flag))
        msg_dict = ({
            self.keyword_id:
            json.dumps({
                'msg':
                str(desc),
                'price':
                self.cur_price / 100.0,
                'word':
                self.word,
                'platform':
                self.platform,
                'exp_rank_range':
                self.exp_rank_range,
                'limit_price':
                self.limit_price / 100.0,
                'nearly_success':
                self.nearly_success,
                'result_flag':
                self.result_flag,
                'cur_rank_dict':
                self.cur_rank_dict,
                'rob_time':
                datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            })
        })
        if self.is_auto_robrank:
            MessageChannel.save_msg(msg_dict)
        else:
            MessageChannel.publish_msg(msg_dict)
        return True

    def clac_next_bid(self):
        if self.result_flag != 'doing':
            return
        try:
            if self.try_times >= KeywordLocker.MOST_TRY_TIMES:
                self.result_flag = 'waiting'
                self.fail_reason = '尝试次数过多'
                return
            # elif self.cur_price == self.limit_price:
            #     self.result_flag = 'waiting'
            #     self.fail_reason = '已经出到限价'
            else:
                self.calc_test_price()
                if self.check_test_price():
                    # 成功时保存快照,方便失败时,直接恢复到该价格
                    if self.check_is_ok(
                            self.cur_rank) or self.check_nearly_success(
                                self.cur_rank):
                        self.last_rank = self.cur_rank
                        self.last_price = self.cur_price
                        self.last_rank_dict = self.cur_rank_dict
                    self.try_times += 1
                else:
                    self.result_flag = 'waiting'
                    self.fail_reason = '没有改价空间'
        except Exception, e:
            self.result_flag = 'waiting'
            self.save_log(desc='计算出价失败: e=%s' % e)
            self.fail_reason = '其他原因'
            return
Esempio n. 6
0
class PhoneNumber(Document):
    meta = {
        "collection":
        "noc.phonenumbers",
        "strict":
        False,
        "auto_create_index":
        False,
        "indexes": [
            "static_service_groups",
            "effective_service_groups",
            "static_client_groups",
            "effective_client_groups",
        ],
    }

    number = StringField()
    profile = PlainReferenceField(PhoneNumberProfile)
    state = PlainReferenceField(State)
    dialplan = PlainReferenceField(DialPlan)
    phone_range = PlainReferenceField(PhoneRange)
    category = PlainReferenceField(NumberCategory)
    description = StringField()
    service = PlainReferenceField(Service)
    project = ForeignKeyField(Project)
    protocol = StringField(
        default="SIP",
        choices=[
            ("SIP", "SIP"),
            ("H323", "H.323"),
            ("SS7", "SS7"),
            ("MGCP", "MGCP"),
            ("H247", "H.247"),
            ("ISDN", "ISDN"),
            ("Skinny", "Skinny"),
        ],
    )
    # Auto-change status to F after *allocated_till*
    allocated_till = DateTimeField()
    # Last state change
    changed = DateTimeField()
    #
    administrative_domain = ForeignKeyField(AdministrativeDomain)
    # Resource groups
    static_service_groups = ListField(ObjectIdField())
    effective_service_groups = ListField(ObjectIdField())
    static_client_groups = ListField(ObjectIdField())
    effective_client_groups = ListField(ObjectIdField())

    _id_cache = cachetools.TTLCache(100, ttl=60)

    def __str__(self):
        return self.number

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return PhoneNumber.objects.filter(id=id).first()

    def clean(self):
        super(PhoneNumber, self).clean()
        # Check number is valid integer
        self.number = clean_number(self.number or "")
        if not self.number:
            raise ValidationError("Empty phone number")
        # Change parent
        self.phone_range = PhoneRange.get_closest_range(
            dialplan=self.dialplan, from_number=self.number)
        # Set profile when necessary
        if not self.profile:
            if not self.phone_range:
                raise ValidationError("Either range or profile must be set")
            self.profile = self.phone_range.profile.default_number_profile

    @property
    def enum(self):
        return ".".join(reversed(self.number)) + ".e164.arpa"
Esempio n. 7
0
class Contributions(DynamicDocument):
    project = LazyReferenceField("Projects",
                                 required=True,
                                 passthrough=True,
                                 reverse_delete_rule=CASCADE)
    identifier = StringField(required=True,
                             help_text="material/composition identifier")
    formula = StringField(
        help_text="formula (set dynamically if not provided)")
    is_public = BooleanField(required=True,
                             default=True,
                             help_text="public/private contribution")
    last_modified = DateTimeField(required=True,
                                  default=datetime.utcnow,
                                  help_text="time of last modification")
    needs_build = BooleanField(default=True, help_text="needs notebook build?")
    data = DictField(
        default=dict,
        validation=valid_dict,
        pullout_key="display",
        help_text="simple free-form data",
    )
    structures = ListField(ReferenceField("Structures", null=True),
                           default=list,
                           max_length=10)
    tables = ListField(ReferenceField("Tables", null=True),
                       default=list,
                       max_length=10)
    attachments = ListField(ReferenceField("Attachments", null=True),
                            default=list,
                            max_length=10)
    notebook = ReferenceField("Notebooks")
    meta = {
        "collection":
        "contributions",
        "indexes": [
            "project",
            "identifier",
            "formula",
            "is_public",
            "last_modified",
            "needs_build",
            "notebook",
            {
                "fields": [(r"data.$**", 1)]
            },
            # can only use wildcardProjection option with wildcard index on all document fields
            {
                "fields": [(r"$**", 1)],
                "wildcardProjection": {
                    "project": 1
                }
            },
        ] + list(COMPONENTS.keys()),
    }

    @queryset_manager
    def objects(doc_cls, queryset):
        return queryset.no_dereference().only("project", "identifier",
                                              "formula", "is_public",
                                              "last_modified", "needs_build")

    @classmethod
    def post_init(cls, sender, document, **kwargs):
        # replace existing components with according ObjectIds
        for component, fields in COMPONENTS.items():
            lst = document._data.get(component)
            if lst and lst[0].id is None:  # id is None for incoming POST
                resource = get_resource(component)
                for i, o in enumerate(lst):
                    digest = get_md5(resource, o, fields)
                    objs = resource.document.objects(md5=digest)
                    exclude = list(resource.document._fields.keys())
                    obj = objs.exclude(*exclude).only("id").first()
                    if obj:
                        lst[i] = obj.to_dbref()

    @classmethod
    def pre_save_post_validation(cls, sender, document, **kwargs):
        # set formula field
        if hasattr(document, "formula") and not document.formula:
            formulae = current_app.config["FORMULAE"]
            document.formula = formulae.get(document.identifier,
                                            document.identifier)

        # project is LazyReferenceField & load columns due to custom queryset manager
        project = document.project.fetch().reload("columns")
        columns = {col.path: col for col in project.columns}

        # run data through Pint Quantities and save as dicts
        def make_quantities(path, key, value):
            key = key.strip()
            if key in quantity_keys or not isinstance(value,
                                                      (str, int, float)):
                return key, value

            # can't be a quantity if contains 2+ spaces
            str_value = str(value).strip()
            if str_value.count(" ") > 1:
                return key, value

            # don't parse if column.unit indicates string type
            field = delimiter.join(["data"] + list(path) + [key])
            if field in columns:
                if columns[field].unit == "NaN":
                    return key, str_value

            # parse as quantity
            q = get_quantity(str_value)
            if not q:
                return key, value

            # silently ignore "nan"
            if isnan(q.nominal_value):
                return False

            ## try compact representation
            #qq = q.value.to_compact()
            #q = new_error_units(q, qq)

            ## reduce dimensionality if possible
            #if not q.check(0):
            #    qq = q.value.to_reduced_units()
            #    q = new_error_units(q, qq)

            # ensure that the same units are used across contributions
            if field in columns:
                column = columns[field]
                if column.unit != str(q.value.units):
                    try:
                        qq = q.value.to(column.unit)
                        q = new_error_units(q, qq)
                    except DimensionalityError:
                        raise ValueError(
                            f"Can't convert [{q.units}] to [{column.unit}] for {field}!"
                        )

            # significant digits
            q = truncate_digits(q)

            # return new value dict
            display = str(q.value) if isnan(q.std_dev) else str(q)
            value = {
                "display": display,
                "value": q.nominal_value,
                "error": q.std_dev,
                "unit": str(q.units),
            }
            return key, value

        document.data = remap(document.data,
                              visit=make_quantities,
                              enter=enter)
        document.last_modified = datetime.utcnow()
        document.needs_build = True

    @classmethod
    def pre_delete(cls, sender, document, **kwargs):
        args = list(COMPONENTS.keys())
        document.reload(*args)

        for component in COMPONENTS.keys():
            # check if other contributions exist before deletion!
            for idx, obj in enumerate(getattr(document, component)):
                q = {component: obj.id}
                if sender.objects(**q).count() < 2:
                    obj.delete()
Esempio n. 8
0
class GeocoderCache(Document):
    meta = {
        "collection": "noc.geocodercache",
        "indexes": [{
            "fields": ["expires"],
            "expireAfterSeconds": 0
        }]
    }

    # query hash
    hash = StringField(primary_key=True)
    # Raw query
    query = StringField()
    # Geocoding system
    geocoder = StringField()
    #
    path = ListField(StringField())
    # Geo coordinates
    lon = FloatField()
    lat = FloatField()
    #
    error = StringField()
    #
    expires = DateTimeField()

    NEGATIVE_TTL = config.geocoding.negative_ttl

    rx_slash = re.compile(r"\s+/")
    rx_dots = re.compile(r"\.\.+")
    rx_sep = re.compile(r"[ \t;:!]+")
    rx_comma = re.compile("(\s*,)+")
    rx_dotcomma = re.compile(r",\s*\.,")

    geocoders = []

    gcls = {
        "yandex": "noc.core.geocoding.yandex.YandexGeocoder",
        "google": "noc.core.geocoding.google.GoogleGeocoder"
    }

    @classmethod
    def iter_geocoders(cls):
        if not cls.geocoders:
            for gc in config.geocoding.order.split(","):
                gc = gc.strip()
                if gc in cls.gcls:
                    h = get_handler(cls.gcls[gc])
                    if h:
                        cls.geocoders += [h]
        for h in cls.geocoders:
            yield h

    @classmethod
    def clean_query(cls, query):
        if type(query) == str:
            query = unicode(query, "utf-8")
        query = query.upper().encode("utf-8")
        query = cls.rx_slash.sub("/", query)
        query = cls.rx_dots.sub(" ", query)
        query = cls.rx_comma.sub(", ", query)
        query = cls.rx_dotcomma.sub(",", query)
        query = cls.rx_sep.sub(" ", query)
        return query.strip()

    @classmethod
    def get_hash(cls, query):
        return base64.b64encode(hashlib.sha256(query).digest())[:12]

    @classmethod
    def forward(cls, query, bounds=None):
        # Clean query
        query = cls.clean_query(query)
        if not query:
            logger.warning("Query is None")
            return None
        # Calculate hash
        hash = cls.get_hash(query)
        # Search data
        c = cls._get_collection()
        #
        r = c.find_one({"_id": hash})
        if r:
            # Found
            if r.get("error") and r.get("exact") is None:
                # If exact result - continue
                logger.warning("Error result and exact is NONE on query: %s",
                               query)
                return None
            return GeoCoderResult(exact=r.get("exact", True),
                                  query=query,
                                  path=r.get("path") or [],
                                  lon=r.get("lon"),
                                  lat=r.get("lat"))
        # Not found, resolve
        r = None
        error = "Not found"
        gsys = None
        lr = None
        for gcls in cls.iter_geocoders():
            g = gcls()
            gsys = g.name
            try:
                r = g.forward(query, bounds)
                if r and r.exact:
                    if r.lon is not None and r.lat is not None:
                        error = None
                        break
                    else:
                        r = None
                        error = "No coordinates"
                else:
                    if r and not lr and r.lon and r.lat:
                        lr = r  # Save first non-exact
                    r = None
            except GeoCoderError as e:
                error = str(e)
        sq = {"query": query, "system": gsys, "error": error}
        if not r and lr:
            r = lr  # Reuse first non-exact message
        if r:
            if r.path:
                sq["path"] = r.path
            if r.lon and r.lat:
                sq["lon"], sq["lat"] = r.lon, r.lat
            sq["exact"] = r.exact
        if not r or not r.exact:
            sq["expires"] = datetime.datetime.now() + datetime.timedelta(
                seconds=cls.NEGATIVE_TTL)
        # Write to database
        c.update({"_id": hash}, {"$set": sq}, upsert=True)
        return r
Esempio n. 9
0
class MIB(Document):
    meta = {
        "collection": "noc.mibs",
        "strict": False,
        "auto_create_index": False
    }
    name = StringField(required=True, unique=True)
    description = StringField(required=False)
    last_updated = DateTimeField(required=True)
    depends_on = ListField(StringField())
    # TC definitions: name -> SYNTAX
    typedefs = DictField(required=False)
    # Compiled MIB version
    version = IntField(required=False, default=0)

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
    _name_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    def __str__(self):
        return self.name

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return MIB.objects.filter(id=id).first()

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_name_cache"),
                             lock=lambda _: id_lock)
    def get_by_name(cls, name):
        return MIB.objects.filter(name=name).first()

    @classmethod
    def parse_syntax(cls, syntax):
        """
        Process part of smidump output and convert to syntax structure
        """
        if "base_type" in syntax:
            # Already compiled
            return syntax
        s = {}
        if "basetype" in syntax:
            s["base_type"] = syntax["basetype"]
        elif "base_type" in syntax:
            s["base_type"] = syntax["base_type"]
        if "name" in syntax and "module" in syntax:
            if syntax["module"] == "":
                # Empty module -> builitin types
                s["base_type"] = syntax["name"]
            else:
                # Resolve references
                mib = MIB.objects.filter(name=syntax["module"]).first()
                if mib is None:
                    raise MIBNotFoundException(syntax["module"])
                if not mib.typedefs or syntax["name"] not in mib.typedefs:
                    return {}
                td = mib.typedefs[syntax["name"]]
                for k in ["base_type", "display_hint", "enum_map"]:
                    if k in td:
                        s[k] = td[k]
        if s["base_type"] in ("Enumeration", "Bits"):
            enum_map = s.get("enum_map", {})
            for k in syntax:
                sk = syntax[k]
                if not isinstance(sk, dict):
                    continue
                if "nodetype" in sk and sk["nodetype"] == "namednumber":
                    enum_map[sk["number"]] = k
            s["enum_map"] = enum_map
        if "format" in syntax:
            s["display_hint"] = syntax["format"]
        return s

    def load_data(self, data):
        """
        Load mib data from list of {oid:, name:, description:, syntax:}
        :param data:
        :return:
        """
        # Get MIB preference
        mp = MIBPreference.objects.filter(mib=self.name).first()
        mib_preference = mp.preference if mp else None
        prefs = {}  # MIB Preferences cache
        # Load data
        for v in data:
            oid = v["oid"]
            oid_name = v["name"]
            description = v.get("description", None)
            o = MIBData.objects.filter(oid=oid).first()
            if o is not None:
                if o.name == oid_name:
                    # Same oid, same name: duplicated declaration.
                    # Silently skip
                    continue
                # For same MIB - leave first entry
                if oid_name.split("::", 1)[0] == o.name.split("::", 1)[0]:
                    continue
                # Try to resolve collision
                if not mib_preference:
                    # No preference for target MIB
                    raise OIDCollision(oid, oid_name, o.name,
                                       "No preference for %s" % self.name)
                o_mib = o.name.split("::")[0]
                if o_mib not in prefs:
                    mp = MIBPreference.objects.filter(mib=o_mib).first()
                    if not mp:
                        # No preference for destination MIB
                        raise OIDCollision(oid, oid_name, o.name,
                                           "No preference for %s" % o_mib)
                    prefs[o_mib] = mp.preference  # Add to cache
                o_preference = prefs[o_mib]
                if mib_preference == o_preference:
                    # Equal preferences, collision
                    raise OIDCollision(oid, oid_name, o.name,
                                       "Equal preferences")
                if mib_preference < o_preference:
                    # Replace existing
                    o.aliases = sorted(o.aliases + [o.name])
                    o.name = oid_name
                    o.mib = self.id
                    if description:
                        o.description = description
                    syntax = v.get("syntax")
                    if syntax:
                        o.syntax = MIB.parse_syntax(syntax)
                    o.save()
                else:
                    # Append to aliases
                    if oid_name not in o.aliases:
                        o.aliases = sorted(o.aliases + [oid_name])
                        o.save()
            else:
                # No OID collision found, save
                syntax = v.get("syntax")
                if syntax:
                    syntax = MIB.parse_syntax(syntax)
                MIBData(mib=self.id,
                        oid=oid,
                        name=oid_name,
                        description=description,
                        syntax=syntax).save()

    @classmethod
    def get_oid(cls, name):
        """
        Get OID by name
        """
        tail = ""
        match = rx_tailing_numbers.match(name)
        if match:
            name, tail = match.groups()
        # Search by primary name
        d = MIBData.objects.filter(name=name).first()
        if not d:
            # Search by aliases
            d = MIBData.objects.filter(aliases=name).first()
        if d:
            return d.oid + tail
        return None

    @classmethod
    def get_name(cls, oid):
        """
        Get longest match name by OID
        """
        oid = OIDAlias.rewrite(oid)
        l_oid = oid.split(".")
        rest = []
        while l_oid:
            c_oid = ".".join(l_oid)
            d = MIBData.objects.filter(oid=c_oid).first()
            if d:
                return MIBAlias.rewrite(".".join([d.name] + rest))
            else:
                rest = [l_oid.pop()] + rest
        return oid

    @classmethod
    def get_name_and_syntax(cls, oid):
        """
        :return: (name, syntax)
        """
        oid = OIDAlias.rewrite(oid)
        l_oid = oid.split(".")
        rest = []
        while l_oid:
            c_oid = ".".join(l_oid)
            d = MIBData.objects.filter(oid=c_oid).first()
            if d:
                name = d.name
                if rest:
                    name += "." + ".".join(reversed(rest))
                return (MIBAlias.rewrite(name),
                        SyntaxAlias.rewrite(name, d.syntax))
            else:
                rest += [l_oid.pop()]
        return oid, None

    @classmethod
    def get_description(cls, name):
        """
        Get longest match description by name
        """
        match = rx_tailing_numbers.match(name)
        if match:
            name, _ = match.groups()
        # Search by primary name
        d = MIBData.objects.filter(name=name).first()
        if not d:
            # Search by aliases
            d = MIBData.objects.filter(aliases=name).first()
        if d:
            return d.description
        else:
            return None

    @property
    def depended_by(self):
        return MIB.objects.filter(depends_on=self.name)

    def clean(self):
        """
        Gracefully wipe out MIB data
        """
        # Delete data without aliases
        MIBData.objects.filter(mib=self.id, aliases=[]).delete()
        # Dereference aliases
        prefs = {}  # MIB -> Preference
        for o in MIBData.objects.filter(mib=self.id, aliases__ne=[]):
            if not o.aliases:  # NO aliases
                o.delete()
                continue
            if len(o.aliases) == 1:  # Only one alias
                ba = o.aliases[0]
            else:
                # Find preferable alias
                ba = None
                lp = None
                for a in o.aliases:
                    am = a.split("::")[0]
                    # Find MIB preference
                    if am not in prefs:
                        p = MIBPreference(mib=am).first()
                        if p is None:
                            raise Exception("No preference for %s" % am)
                        prefs[am] = p.preference
                    p = prefs[am]
                    if lp is None or p < lp:
                        # Better
                        ba = a
                        lp = p
            # Promote preferable alias
            o.name = ba
            o.aliases = [a for a in o.aliases if a != ba]
            o.save()

    @classmethod
    def resolve_vars(cls, vars):
        """
        Resolve FM key -> value dict according to MIBs

        :param cls:
        :param vars:
        :return:
        """
        r = {}
        for k in vars:
            if not is_oid(k):
                # Nothing to resolve
                continue
            v = fm_unescape(vars[k])
            rk, syntax = cls.get_name_and_syntax(k)
            rv = v
            if syntax:
                # Format value according to syntax
                if syntax["base_type"] == "Enumeration":
                    # Expand enumerated type
                    try:
                        rv = syntax["enum_map"][str(v)]
                    except KeyError:
                        pass
                elif syntax["base_type"] == "Bits":
                    # @todo: Fix ugly hack
                    if v.startswith("="):
                        xv = int(v[1:], 16)
                    else:
                        xv = 0
                        for c in v:
                            xv = (xv << 8) + ord(c)
                    # Decode
                    b_map = syntax.get("enum_map", {})
                    b = []
                    n = 0
                    while xv:
                        if xv & 1:
                            x = str(n)
                            if x in b_map:
                                b = [b_map[x]] + b
                            else:
                                b = ["%X" % (1 << n)]
                        n += 1
                        xv >>= 1
                    rv = "(%s)" % ",".join(b)
                else:
                    # Render according to TC
                    rv = render_tc(v, syntax["base_type"],
                                   syntax.get("display_hint", None))
                    try:
                        unicode(rv, "utf8")
                    except ValueError:
                        # Escape invalid UTF8
                        rv = fm_escape(rv)
            else:
                try:
                    unicode(rv, "utf8")
                except ValueError:
                    # escape invalid UTF8
                    rv = fm_escape(rv)
            if is_oid(v):
                # Resolve OID in value
                rv = MIB.get_name(v)
            if rk != k or rv != v:
                r[rk] = rv
        return r
Esempio n. 10
0
class Booking2(Document):
    start_time = DateTimeField(default=datetime.datetime.utcnow)
    end_time = DateTimeField(default=datetime.datetime.utcnow)
    booking_price = FloatField()
    meta = {'db_alias': 'db1'}
Esempio n. 11
0
class SensorValueModel(Document):
    meta = {'collection': 'sensor_values'}
    type = StringField(required=True)
    timestamp = DateTimeField(default=datetime.now)
    sensor = ReferenceField(SensorModel, required=True)
    value = FloatField(required=True)
Esempio n. 12
0
class Revision(Document):
    user_id = IntField(required=True)
    timestamp = DateTimeField(default=datetime.now, required=True)
    instance_data = DictField()
    instance_related_revisions = DictField()
    instance_type = ReferenceField(ContentType, dbref=False, required=True)
    instance_id = ObjectIdField(required=True)
    comment = StringField(required=False)

    def __init__(self, *args, **kwargs):
        super(Revision, self).__init__(*args, **kwargs)

        # create lookup of related field types
        related_field_types = {}
        instance_model = self.instance_type.document_model()
        for key, field in instance_model._fields.items():
            related_field_type = None
            if isinstance(field, ListField):
                if isinstance(field.field, ReferenceField):
                    related_field_type = field.field.document_type_obj
            elif isinstance(field, ReferenceField):
                related_field_type = field.document_type_obj
            if related_field_type:
                related_field_types[key] = related_field_type
        self.related_field_types = related_field_types

    def __unicode__(self):
        return '<Revision user=%s, time=%s, type=%s, comment=%s, >' % (
            self.user_id,
            self.timestamp,
            self.instance_type,
            self.comment,
        )

    @property
    def instance(self):
        instance_model = self.instance_type.document_model()
        data = dict(self.instance_data)
        for key, value in data.items():
            if key in self.related_field_types:
                if key in self.instance_related_revisions:
                    revision_value = self.instance_related_revisions.get(key)
                    if isinstance(revision_value, (list, tuple)):
                        values = []
                        for i, rev_id in enumerate(revision_value):
                            if rev_id:
                                revision = Revision.objects.get(pk=rev_id)
                                values.append(revision.instance)
                            else:
                                obj_id = value[i]
                                values.append(
                                    self.related_field_types.get(
                                        key).objects.get(pk=obj_id))
                        data[key] = values
                    else:
                        if value:
                            revision = Revision.objects.get(pk=value)
                            data[key] = revision.instance
                        else:
                            data[key] = self.related_field_types.get(
                                key).objects.get(pk=value)
                else:
                    if isinstance(value, (list, tuple)):
                        data[key] = [
                            self.related_field_types.get(key).objects.get(pk=v)
                            for v in value
                        ]
                    else:
                        data[key] = self.related_field_types.get(
                            key).objects.get(pk=value)
        return instance_model(**data)

    @property
    def user(self):
        try:
            return User.objects.get(pk=self.user_id)
        except User.DoesNotExist:
            return None

    def diff(self, revision=None):
        """
            Returns the diff of the current revision with the given revision.
            If the given revision is empty, use the latest revision of the 
            document instance.
        """
        if not revision:
            revision = Revision.latest_revision(self.instance)
        if not revision:
            return self.instance_data
        diff_dict = {}
        for key, value in self.instance_data.items():
            if value != revision.instance_data.get(key):
                diff_dict[key] = value
        return diff_dict

    def revert(self):
        """
            Revert the associated document instance back to this revision.
            Return the document instance.
        """
        self.instance.save()
        return self.instance

    @staticmethod
    def latest_revision(instance):
        try:
            instance_type = ContentType.objects.get(
                class_name=instance._class_name)
            revisions = Revision.objects.filter(
                instance_type=instance_type,
                instance_id=instance.pk).order_by('-timestamp')
            if revisions.count() > 0:
                return revisions[0]
        except ContentType.DoesNotExist:
            pass
        return None

    @staticmethod
    def save_revision(user, instance, comment=None):
        if not instance._meta.get('versioned', None):
            raise ValueError(
                'instance meta does not specify it to be versioned, set versioned=True to enable'
            )

        instance_type, is_new = ContentType.objects.get_or_create(
            class_name=instance._class_name)
        instance_data = dict(instance._data)
        instance_related_revisions = {}

        # ensure instance has been saved at least once
        if not instance.pk:
            instance.save()

        # Save instance ID in data dict
        instance_data['id'] = instance.pk

        # Remove None entry if it exists
        if None in instance_data:
            del instance_data[None]

        # create lookup of related field types
        related_field_types = {}
        for key, field in instance._fields.items():
            related_field_type = None
            if isinstance(field, ListField):
                if isinstance(field.field, ReferenceField):
                    related_field_type = field.field.document_type_obj
            elif isinstance(field, ReferenceField):
                related_field_type = field.document_type_obj
            # TODO: elif isinstance(field, DictField):

            if related_field_type:
                related_field_types[key] = related_field_type

        # process field data
        for key, value in instance_data.items():

            if key in related_field_types:

                # check if related field is versioned, store revision data
                if related_field_types.get(key)._meta.get('versioned', None):

                    # versioned, store revision ID(s)
                    if isinstance(value, (list, tuple)):
                        id_revisions = []
                        for v in value:
                            revision = Revision.latest_revision(v)
                            # TODO: if latest revision doesn't exist then maybe
                            # it should be created here
                            if revision:
                                id_revisions.append(revision.pk)
                            else:
                                # if no revision exists, then explicitely
                                # store a None entry
                                id_revisions.append(None)
                            instance_related_revisions[key] = id_revisions
                    else:
                        revision = Revision.latest_revision(value)
                        # TODO: if latest revision doesn't exist then maybe it
                        # should be created here
                        if revision:
                            instance_related_revisions[key] = revision.pk
                        else:
                            # if no revision exists, then explicitely store a
                            # None entry
                            instance_related_revisions[key] = None

                # store object ID(s) in instance_data
                if isinstance(value, (list, tuple)):
                    instance_data[key] = [v.pk for v in value]
                else:
                    instance_data[key] = value.pk

            else:
                # store data as is
                instance_data[key] = value

        # create the revision, but do not save it yet
        revision = Revision(
            user_id=user.pk,
            timestamp=datetime.now(),
            instance_type=instance_type,
            instance_data=instance_data,
            instance_related_revisions=instance_related_revisions,
            instance_id=instance.pk,
            comment=comment)

        # check for any differences in data from lastest revision
        # return the latest revision if no difference
        latest_revision = Revision.latest_revision(instance)
        if latest_revision:
            diff = revision.diff(latest_revision)
            if not diff:
                return latest_revision, False

        # save revision and return
        revision.save()
        return revision, True
Esempio n. 13
0
class Captcha(EmbeddedDocument):
    captcha = StringField()
    expireTime = DateTimeField()
Esempio n. 14
0
class Done(Document):
    meta = {"todo": "done"}
    name = StringField()
    done_on = DateTimeField(default=datetime.now)
Esempio n. 15
0
class RemoteSystem(Document):
    meta = {
        "collection": "noc.remotesystem",
        "strict": False,
        "auto_create_index": False
    }

    name = StringField(unique=True)
    description = StringField()
    handler = StringField()
    # Environment variables
    environment = ListField(EmbeddedDocumentField(EnvItem))
    # Enable extractors/loaders
    enable_admdiv = BooleanField()
    enable_administrativedomain = BooleanField()
    enable_authprofile = BooleanField()
    enable_container = BooleanField()
    enable_link = BooleanField()
    enable_managedobject = BooleanField()
    enable_managedobjectprofile = BooleanField()
    enable_networksegment = BooleanField()
    enable_networksegmentprofile = BooleanField()
    enable_object = BooleanField()
    enable_service = BooleanField()
    enable_serviceprofile = BooleanField()
    enable_subscriber = BooleanField()
    enable_subscriberprofile = BooleanField()
    enable_resourcegroup = BooleanField()
    enable_ttsystem = BooleanField()
    enable_project = BooleanField()
    # Usage statistics
    last_extract = DateTimeField()
    last_successful_extract = DateTimeField()
    extract_error = StringField()
    last_load = DateTimeField()
    last_successful_load = DateTimeField()
    load_error = StringField()

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
    _name_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    def __str__(self):
        return self.name

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id) -> Optional["RemoteSystem"]:
        return RemoteSystem.objects.filter(id=id).first()

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_name_cache"),
                             lock=lambda _: id_lock)
    def get_by_name(cls, name: str) -> Optional["RemoteSystem"]:
        return RemoteSystem.objects.filter(name=name).first()

    @property
    def config(self):
        if not hasattr(self, "_config"):
            self._config = {e.key: e.value for e in self.environment}
        return self._config

    def get_handler(self):
        """
        Return BaseTTSystem instance
        """
        h = get_handler(str(self.handler))
        if not h:
            raise ValueError
        return h(self)

    def get_extractors(self):
        extractors = []
        for k in self._fields:
            if k.startswith("enable_") and getattr(self, k):
                extractors += [k[7:]]
        return extractors

    def extract(self, extractors=None, quiet=False):
        extractors = extractors or self.get_extractors()
        error = None
        try:
            self.get_handler().extract(extractors)
        except Exception as e:
            if not quiet:
                raise e
            error_report()
            error = str(e)
        self.last_extract = datetime.datetime.now()
        if error:
            self.extract_error = error
        else:
            self.last_successful_extract = self.last_extract
        self.save()

    def load(self, extractors=None, quiet=False):
        extractors = extractors or self.get_extractors()
        error = None
        try:
            self.get_handler().load(extractors)
        except Exception as e:
            if not quiet:
                raise e
            error_report()
            error = str(e)
        self.last_load = datetime.datetime.now()
        if error:
            self.load_error = error
        else:
            self.last_successful_load = self.last_load
        self.save()

    def check(self, extractors=None):
        extractors = extractors or self.get_extractors()
        try:
            return self.get_handler().check(extractors)
        except Exception:
            error_report()

    def get_loader_chain(self):
        return self.get_handler().get_loader_chain()
Esempio n. 16
0
class BioSegTrial(Document):
    meta = {
        "collection": "biosegtrials",
        "strict": False,
        "auto_create_index": False,
        "indexes": [{
            "fields": ["expires"],
            "expireAfterSeconds": 0
        }],
    }

    # Reason of the trial
    reason = StringField()
    # Attacker segment
    attacker_id = ObjectIdField()
    # Target segment
    target_id = ObjectIdField()
    # Optional attacker object id
    attacker_object_id = IntField()
    # Optional target object id
    target_object_id = IntField()
    # Trial is processed
    processed = BooleanField(default=False)
    # Trial outcome, i.e. keep, eat, feed, calcify
    outcome = StringField()
    # Error report
    error = StringField()
    # Schedule for expiration, only when processed is True
    expires = DateTimeField()

    def __str__(self):
        return str(self.id)

    @classmethod
    def schedule_trial(
        cls,
        attacker: NetworkSegment,
        target: NetworkSegment,
        attacker_object: Optional[ManagedObject] = None,
        target_object: Optional[ManagedObject] = None,
        reason="manual",
    ) -> Optional["BioSegTrial"]:
        if target.id == attacker.id:
            # Not trial same
            return None
        elif attacker.profile.is_persistent and attacker.parent != target.parent:
            # Persistent segment can trial only it has one parent (ring)
            return None
        trial = BioSegTrial(reason=reason,
                            attacker_id=attacker.id,
                            target_id=target.id,
                            processed=False)
        if attacker_object and target_object:
            trial.attacker_object_id = attacker_object.id
            trial.target_object_id = target_object.id
        trial.save()
        return trial

    def set_outcome(self, outcome: str) -> None:
        self.outcome = outcome
        self.processed = True
        self.error = None
        self._set_expires()
        self.save()

    def set_error(self, error: str, fatal: bool = False) -> None:
        self.error = error
        if fatal:
            self.processed = True
            self._set_expires()
        self.save()

    def _set_expires(self) -> None:
        """
        Set expires when necessary

        :return:
        """
        if config.biosegmentation.processed_trials_ttl:
            self.expires = datetime.datetime.now() + datetime.timedelta(
                seconds=config.biosegmentation.processed_trials_ttl)

    def retry(self) -> None:
        """
        Restart trial

        :return:
        """
        self.processed = None
        self.error = None
        self.outcome = None
        self.save()
Esempio n. 17
0
class BaseReport(Document, BaseRptProperty):
    SOURCE_TYPE_CHOICES = ((-1, '汇总'), (1, '站内'), (2, '站外'), (4, '无线站内'),
                           (5, '无线站外'), (6, '未知'), (11, '计算机'), (12, '移动端'))
    SEARCH_TYPE_CHOICES = (
        (-1, '汇总'), (0, '搜索'), (2, '定向'), (3, '店铺推广'), (4, '未知'))  # 1是类目已经取消

    date = DateTimeField(verbose_name="报表日期", required=True)
    search_type = IntField(verbose_name="报表类型",
                           default=3,
                           choices=SEARCH_TYPE_CHOICES)
    source = IntField(verbose_name="数据来源",
                      default=3,
                      choices=SOURCE_TYPE_CHOICES)
    # 基础数据
    impressions = IntField(verbose_name="展现量", default=0)
    click = IntField(verbose_name="点击量", default=0)
    cost = IntField(verbose_name="总花费", default=0)
    avgpos = IntField(verbose_name="平均展现排名", default=0)  # 不一定有
    aclick = IntField(verbose_name="点击量", default=0)
    # 效果数据
    directpay = IntField(verbose_name="直接成交金额", default=0)
    indirectpay = IntField(verbose_name="间接成交金额", default=0)
    directpaycount = IntField(verbose_name="直接成交笔数", default=0)
    indirectpaycount = IntField(verbose_name="间接成交笔数", default=0)
    favitemcount = IntField(verbose_name="宝贝收藏数", default=0)
    favshopcount = IntField(verbose_name="店铺收藏数", default=0)
    carttotal = IntField(verbose_name="购物车总数", default=0)

    meta = {'abstract': True, 'indexes': ['date']}

    base_keys = ('impressions', 'click', 'cost', 'avgpos', 'aclick')  # 报表基础属性
    effect_keys = ('directpay', 'indirectpay', 'directpaycount',
                   'indirectpaycount', 'favitemcount', 'favshopcount',
                   'carttotal')  # 报表效果属性

    REPORT_CFG = (('SUMMARY', 'SUMMARY')
                  )  # 保存报表的下载配置,格式为[('search_type', 'source'), ...]
    IDENTITY = ''  # 用于取报表的分组依据,子类必须要写清楚
    RESERVED_DAYS = 30  # 报表的保留天数
    INIT_DAYS = 90  # 初始化报表时,下载的报表天数

    @staticmethod
    def parse_source(top_obj):
        source = getattr(top_obj, 'source', 'SUMMARY')
        if source == 'SUMMARY':
            return -1
        elif source == 'SEARCH':
            return 0
        elif isinstance(source, int):
            return source
        else:
            try:  # 异步下载会是字符串类型的,尝试强转一下
                return int(source)
            except ValueError:
                return 6

    @staticmethod
    def parse_search_type(top_obj):
        search_type = getattr(top_obj, 'search_type', 'SUMMARY')
        if search_type == 'SUMMARY':
            return -1
        elif search_type == 'SEARCH':
            return 0
        elif isinstance(search_type, int):
            return search_type
        else:
            return 4

    @classmethod
    def parse_rpt(cls, top_obj, rpt_type="base"):
        """
        effect-sample --> 20150916
        {
            'favitemcount': u'77',
            'favshopcount': u'7',
            'directpay': u'24200',
            'source': u'SUMMARY',
            'indirectpay': u'111485',
            'nick': u'\u53ed\u53ed\u54d2\u54d2',
            'indirectcarttotal': u'82',
            'indirectpaycount': u'14',
            'date': u'2015-09-09',
            'carttotal': u'144',
            'directpaycount': u'4',
            'directcarttotal': u'62'
        }

        base-sample --> 20150916
        {
            'aclick': u'982',
            'cpm': u'662.24',
            'ctr': u'0.69',
            'nick': u'\u53ed\u53ed\u54d2\u54d2',
            'cpc': u'96.10',
            'source': u'SUMMARY',
            'cost': u'94368',
            'date': u'2015-09-15',
            'impressions': u'142499',
            'click': u'982'
        }
        """
        rpt_dict = {
            key: int(float(getattr(top_obj, key, 0))) or 0
            for key in getattr(cls, "%s_keys" % rpt_type)
        }
        rpt_dict.update({
            'date': top_obj.date,
            'source': cls.parse_source(top_obj),
            'search_type': cls.parse_search_type(top_obj)
        })
        return {
            '%s_%s_%s' % (top_obj.date, rpt_dict['search_type'], rpt_dict['source']):
            rpt_dict
        }

    @classmethod
    def merge_rpt_dict(cls, base_rpt_dict, effect_rpt_dict, extra_dict):
        default_effect = {k: getattr(cls, k).default for k in cls.effect_keys}
        result_list = []
        for key, base in base_rpt_dict.items():
            tmp_dict = extra_dict.copy()
            tmp_dict.update(base)
            tmp_dict.update(effect_rpt_dict.get(key, default_effect))
            tmp_dict.update(
                {'date': datetime.datetime.strptime(base['date'], '%Y-%m-%d')})
            result_list.append(tmp_dict)
            del tmp_dict
        # result_list.sort(cmp = lambda x, y:cmp(x['date'], y['date']))
        return result_list

    @classmethod
    def get_snap_list(cls,
                      query_dict,
                      rpt_days=None,
                      start_date=None,
                      end_date=None,
                      source=-1,
                      search_type=-1):
        """
        :argument
            query_dict 查询条件
            rpt_days 报表天数 与下面的只用选择其一即可
            start_date 起始日期 字符串,形如'2015-10-12'
            end_date 结束日期 字符串,形如'2015-10-12'
            search_type 报表分类,目前可选(-1, 0, 2) 分别代表(汇总、搜索、定向),这里也可以写query格式,如 {'$in': [0,2]}
            source 报表来源,可选(-1,1,2,4,5) 分别代表(汇总、PC站内、PC站外、移动站内、移动站外),格式也同上
        :return
            {4258150:{'date':'2015-09-30', 'impressions': 300}}
        """
        result_dict = collections.defaultdict(list)
        base_query = {'source': source, 'search_type': search_type}
        if rpt_days:
            date_query = {
                'date': {
                    '$gte':
                    date_2datetime(datetime.date.today() -
                                   datetime.timedelta(days=rpt_days))
                }
            }
        elif start_date:
            date_query = {
                'date': {
                    '$gte': string_2datetime(start_date, fmt='%Y-%m-%d')
                }
            }
            if end_date:
                date_query['date'].update(
                    {'$lte': string_2datetime(end_date, fmt='%Y-%m-%d')})
        else:
            date_query = {}

        base_query.update(date_query)
        base_query.update(query_dict)
        cursor = cls._get_collection().find(base_query).sort([('date', 1)])
        for i in cursor:
            result_dict[i[cls.IDENTITY]].append(ReportDictWrapper(i))
        return result_dict

    @classmethod
    def aggregate_rpt(cls,
                      query_dict,
                      group_keys,
                      rpt_days=None,
                      start_date=None,
                      end_date=None,
                      source=-1,
                      search_type=-1):
        """
        :argument
            query_dict 查询条件
            group_keys 类似mysql的group by字段,即分组的键,形如'adgroup_id,source,search_type'
            rpt_days 报表天数 与下面的只用选择其一即可
            start_date 起始日期 字符串,形如'2015-10-12'
            end_date 结束日期 字符串,形如'2015-10-12'
            search_type 报表分类,目前可选(-1, 0, 2) 分别代表(汇总、搜索、定向),这里也可以写query格式,如 {'$in': [0,2]}
            source 报表来源,可选(-1,1,2,4,5) 分别代表(汇总、PC站内、PC站外、移动站内、移动站外),格式也同上
        :return
            [{'_id':{group_key1: <value>, group_key2: <value>}, 'click': 200}]
        """
        base_query = {'source': source, 'search_type': search_type}
        if rpt_days:
            date_query = {
                'date': {
                    '$gte':
                    date_2datetime(datetime.date.today() -
                                   datetime.timedelta(days=rpt_days))
                }
            }
        elif start_date:
            date_query = {
                'date': {
                    '$gte': string_2datetime(start_date, fmt='%Y-%m-%d')
                }
            }
            if end_date:
                date_query['date'].update(
                    {'$lte': string_2datetime(end_date, fmt='%Y-%m-%d')})
        else:
            date_query = {}
        base_query.update(date_query)
        base_query.update(query_dict)
        group_key_dict = {
            key.strip(): '$%s' % key.strip()
            for key in group_keys.split(",")
        }
        pipeline = [
            {
                '$match': base_query
            },
            {
                '$group': {
                    '_id': group_key_dict,
                    'impressions': {
                        '$sum': '$impressions'
                    },
                    'click': {
                        '$sum': '$click'
                    },
                    'cost': {
                        '$sum': '$cost'
                    },
                    'aclick': {
                        '$sum': '$aclick'
                    },
                    'avgpos': {
                        '$last': '$avgpos'
                    },
                    'directpay': {
                        '$sum': '$directpay'
                    },
                    'indirectpay': {
                        '$sum': '$indirectpay'
                    },
                    'directpaycount': {
                        '$sum': '$directpaycount'
                    },
                    'indirectpaycount': {
                        '$sum': '$indirectpaycount'
                    },
                    'favitemcount': {
                        '$sum': '$favitemcount'
                    },
                    'favshopcount': {
                        '$sum': '$favshopcount'
                    },
                    'carttotal': {
                        '$sum': '$carttotal'
                    },
                    'rpt_days': {
                        '$sum': 1
                    }
                }
            },
        ]
        result = cls._get_collection().aggregate(pipeline)
        if result['ok']:
            return result['result']
        else:
            log.error("get_summed_detail_rpt error, result=%s" % result)
        return []

    @classmethod
    def get_summed_rpt(cls,
                       query_dict,
                       rpt_days=None,
                       start_date=None,
                       end_date=None,
                       source=-1,
                       search_type=-1):
        """aggregate_rpt的常用版
        :argument
            query_dict 查询条件
            rpt_days 报表天数 与下面的只用选择其一即可
            start_date 起始日期 字符串,形如'2015-10-12'
            end_date 结束日期 字符串,形如'2015-10-12'
            source 报表来源,默认给汇总
            search_type 报表类型,默认给汇总
        :return
            {<IDENTITY>: {'impressions':200, 'click': 20}} IDENTITY即定义的每个报表的结构主键
        """
        result_dict = {}
        result_list = cls.aggregate_rpt(query_dict, cls.IDENTITY, rpt_days,
                                        start_date, end_date, source,
                                        search_type)
        for result in result_list:
            temp_key = result.pop('_id')
            result_dict.update(
                {temp_key[cls.IDENTITY]: ReportDictWrapper(result)})
        return result_dict

    @classmethod
    def get_platform_summed_rpt(cls,
                                query_dict,
                                rpt_days=None,
                                start_date=None,
                                end_date=None,
                                platform=-1):
        platform_source_map = {-1: (-1, -1), 0: (12, 0), 1: (11, 0)}
        source, search_type = platform_source_map[platform]
        return cls.get_summed_rpt(query_dict=query_dict,
                                  rpt_days=rpt_days,
                                  start_date=start_date,
                                  end_date=end_date,
                                  source=source,
                                  search_type=search_type)

    @classmethod
    def get_split_rpt(cls,
                      query_dict,
                      group_keys,
                      rpt_days=None,
                      start_date=None,
                      end_date=None,
                      source=-1,
                      search_type=-1):
        """aggregate_rpt的常用版
        :argument
            query_dict 查询条件
            rpt_days 报表天数 与下面的只用选择其一即可
            start_date 起始日期 字符串,形如'2015-10-12'
            end_date 结束日期 字符串,形如'2015-10-12'
            source 报表来源,默认给汇总
            search_type 报表类型,默认给汇总
        :return
            {<IDENTITY>: {'impressions':200, 'click': 20}} IDENTITY即定义的每个报表的结构主键
        """
        result_list = []
        temp_list = cls.aggregate_rpt(query_dict, group_keys, rpt_days,
                                      start_date, end_date, source,
                                      search_type)
        for result in temp_list:
            # temp_key = result['_id'][cls.IDENTITY]
            # source = result['_id']['source']
            # result.pop('_id')
            _id = result.pop('_id')
            temp_result = ReportDictWrapper(result)
            # temp_result['source'] = source
            # temp_result[cls.IDENTITY] = temp_key
            temp_result.update(_id)
            result_list.append(temp_result)
        return result_list

    @classmethod
    def update_rpt_list(cls, remove_query, insert_list):
        """更新报表,更新前先删除当前条件下的,以免重复"""
        coll = cls._get_collection()
        coll.remove(remove_query)
        return coll.insert(insert_list)

    @classmethod
    def clean_outdated(cls):
        """清除过期数据"""
        cls._get_collection().remove({
            'date': {
                '$lte':
                datetime.datetime.now() -
                datetime.timedelta(days=cls.RESERVED_DAYS)
            }
        })
        return True
Esempio n. 18
0
class CatStatic(Document):
    cat_id = IntField(verbose_name='类目ID', required=True)
    rpt_date = DateTimeField(verbose_name="报表日期", required=True)
    impression = IntField(verbose_name='单个类目下所有的impression', default=0)
    click = IntField(verbose_name='单个类目下所有的click', default=0)
    cost = IntField(verbose_name='单个类目下所有的cost单位(分)', default=0)
    directtransaction = IntField(verbose_name='直接成交金额', default=0)
    indirecttransaction = IntField(verbose_name='间接成交金额', default=0)
    directtransactionshipping = IntField(verbose_name='直接成交笔数', default=0)
    indirecttransactionshipping = IntField(verbose_name='间接成交笔数', default=0)
    favitemtotal = IntField(verbose_name='宝贝收藏数', default=0)
    favshoptotal = IntField(verbose_name='店铺收藏数', default=0)
    transactionshippingtotal = IntField(verbose_name='总的成交笔数', default=0)
    transactiontotal = IntField(verbose_name='成交总金额', default=0)
    favtotal = IntField(verbose_name='总的收藏数,包括宝贝收藏和店铺收藏', default=0)
    competition = IntField(verbose_name='竞争度', default=0)
    ctr = FloatField(verbose_name='点击率', default=0)
    cpc = FloatField(verbose_name='平均点击花费(分)', default=0)
    roi = FloatField(verbose_name='投入产出比', default=0)
    coverage = FloatField(verbose_name='点击转化率', default=0)

    meta = {
        'collection': "kwlib_catstatic",
        "db_alias": "kwlib-db",
        'indexes': ['cat_id', 'rpt_date'],
        "shard_key": ('rpt_date', )
    }

    rpt_field_dict = {
        'impression': int,
        'click': int,
        'cost': int,
        'directtransaction': int,
        'indirecttransaction': int,
        'directtransactionshipping': int,
        'indirecttransactionshipping': int,
        'favitemtotal': int,
        'favshoptotal': int,
        'transactionshippingtotal': int,
        'transactiontotal': int,
        'favtotal': int,
        'competition': int,
        'ctr': float,
        'cpc': float,
        'roi': float,
        'coverage': float
    }

    @classmethod
    def update_cat_market_data(cls, cat_id_list=[], rpt_date=None):
        """
        .更新类目下的大盘数据
        """
        log.info('skip update cat market_data')
        return True
        if not rpt_date:
            rpt_date = datetime.date.today() - datetime.timedelta(days=1)
        if not cat_id_list:
            cat_cur = cat_coll.find({}, {'_id'})
            cat_id_list = [cat['_id'] for cat in cat_cur]

        rpt_date_str = datetime.datetime.strftime(rpt_date, '%Y-%m-%d')
        new_rpt_date = datetime.datetime.strptime(
            rpt_date_str, '%Y-%m-%d')  # 数据库中不能存 date 类型
        result_dict = cat_data_list(cat_id_list, rpt_date_str, rpt_date_str)
        insert_list = []

        for cat_id, cat_data in result_dict.iteritems():
            cat_dict = {'cat_id': cat_id, 'rpt_date': new_rpt_date}
            for rpt_field, func in cls.rpt_field_dict.iteritems():
                cat_dict[rpt_field] = func(getattr(cat_data, rpt_field,
                                                   0))  # 注意单位转换
            insert_list.append(cat_dict)

        cat_static_coll.remove({
            'rpt_date': new_rpt_date,
            'cat_id': {
                '$in': cat_id_list
            }
        })
        cat_static_coll.insert(insert_list)
        log.info('update cat market, cat_count=%s' % len(insert_list))
        return

    @classmethod
    def __get_rpt_list_8id(cls, cat_id):
        cachekey = CacheKey.KWLIB_CAT_STATIC_RPT % cat_id
        cat_data_list = CacheAdpter.get(cachekey, 'web', None)
        if cat_data_list is None:  # 不能简写为 if not cat_data_list: 因为有可能 缓存中有值,但为 []
            rpt_day = 15
            now = datetime.datetime.now()

            # 淘宝接口下线维护,这里临时使用旧数据
            rpt_day = 7
            now = datetime.datetime(2017, 3, 12)

            start_date = date_2datetime(now - datetime.timedelta(days=rpt_day))
            objs = cls.objects.filter(
                cat_id=cat_id, rpt_date__gte=start_date).order_by('rpt_date')
            cat_data_list = [obj for obj in objs]
            # 缓存在第二天7点过期,因为此时数据库中已刷新昨日数据
            timeout = (date_2datetime(now) +
                       datetime.timedelta(days=1, hours=7) - now).seconds
            CacheAdpter.set(cachekey, cat_data_list, 'web', timeout)
        return cat_data_list

    @classmethod
    def get_rpt_list_8id(cls, cat_id, rpt_days=15):
        if rpt_days > 15:  # 此接口为常用接口,限制在15天内
            rpt_days = 15

        rpt_list = cls.__get_rpt_list_8id(cat_id)
        return rpt_list  # 淘宝接口下线维护,这里临时使用旧数据
        # start_date = date_2datetime(datetime.datetime.now() - datetime.timedelta(days = rpt_days))
        # result_list = [rpt for rpt in rpt_list if rpt.rpt_date >= start_date]
        # return result_list

    @classmethod
    def get_market_data_8id(cls, cat_id, rpt_days=7):
        result_dict = {rf: 0 for rf in cls.rpt_field_dict.iterkeys()}
        rpt_list = cls.get_rpt_list_8id(cat_id, rpt_days)
        for rpt in rpt_list:
            for k in result_dict.iterkeys():
                result_dict[k] += getattr(rpt, k, 0)
        if rpt_list:
            result_dict = {
                k: round(v / len(rpt_list), 2)
                for k, v in result_dict.iteritems()
            }  # 取平均值
        return result_dict

    @classmethod
    def get_market_data(cls, cat_id_list, tj_day=7):
        result_dict = {}
        for cat_id in cat_id_list:
            result_dict[cat_id] = cls.get_market_data_8id(cat_id, tj_day)
        return result_dict
Esempio n. 19
0
class DataSourceCache(Document):
    meta = {
        "collection": "datasource_cache",
        "strict": False,
        "auto_create_index": False,
        "indexes": [{"fields": ["expires"], "expireAfterSeconds": 0}],
    }

    name = StringField(primary_key=True)
    data = BinaryField()
    expires = DateTimeField()
    chunks = IntField(min_value=0, max_value=5)
    version = IntField()
    # Next chunk name
    next_name = StringField()

    @classmethod
    def get_data(cls, name):
        """
        Load cached data
        :param name:
        :return:
        """
        data = []
        coll = DataSourceCache._get_collection()
        while name:
            d = coll.find_one({"_id": name})
            if not d:
                # Not found or broken chain
                return None
            if d["version"] != CURRENT_VERSION:
                # Version bump, rebuild cache
                return None
            data += [d["data"]]
            # Proceed to next chunk when necessary
            name = d.get("next_name", None)
        # Finally, decode result
        # avoid string catenation whenever possible
        return cls.decode("".join(data) if len(data) > 1 else data[0])

    @classmethod
    def set_data(cls, name, data, ttl):
        """
        Write data to cache
        :param name:
        :param data:
        :param ttl:
        :return:
        """
        data = cls.encode(data)
        coll = DataSourceCache._get_collection()
        n_chunk = 0
        fmt_chunk_name = "%s.%%d" % name
        expires = (datetime.datetime.now() + datetime.timedelta(seconds=ttl),)
        while data:
            # Split chunk and rest of data
            chunk, data = data[:MAX_DATA_SIZE], data[MAX_DATA_SIZE:]
            # Generate next chunk name when data left
            if data:
                n_chunk += 1
                next_name = fmt_chunk_name % n_chunk
            else:
                next_name = None
            logger.info("Writing chunk %s", name)
            # Update chunk
            coll.update_one(
                {"_id": name},
                {
                    "$set": {
                        "data": bson.Binary(chunk),
                        "version": CURRENT_VERSION,
                        "expires": expires,
                        "next_name": next_name,
                    },
                    "$setOnInsert": {"name": name},
                },
                upsert=True,
            )
            # Name for next chunk
            name = next_name

    @classmethod
    def encode(cls, data):
        """
        v1 encoding: cPickle + zlib.compress
        :param data:
        :return:
        """
        return bz2.compress(data, 9)

    @classmethod
    def decode(cls, data):
        """
        v2 decoding: bz2
        :param data:
        :return:
        """
        return bz2.decompress(data)
Esempio n. 20
0
class Booking(Document):
    meta = {"collection": "booking"}
    meta = {'db_alias': 'db1'}
    start_time = DateTimeField(default=datetime.utcnow)
    end_time = DateTimeField(default=datetime.utcnow)
    booking_price = FloatField()
Esempio n. 21
0
class Interface(Document):
    """
    Interfaces
    """

    meta = {
        "collection":
        "noc.interfaces",
        "strict":
        False,
        "auto_create_index":
        False,
        "indexes": [
            ("managed_object", "name"),
            "mac",
            ("managed_object", "ifindex"),
            "service",
            "aggregated_interface",
        ],
    }
    managed_object = ForeignKeyField(ManagedObject)
    name = StringField()  # Normalized via Profile.convert_interface_name
    # Optional default interface name in case the `name` can be reconfigured
    default_name = StringField()
    type = StringField(choices=[(x, x) for x in INTERFACE_TYPES])
    description = StringField(required=False)
    ifindex = IntField(required=False)
    mac = StringField(required=False)
    aggregated_interface = PlainReferenceField("self", required=False)
    enabled_protocols = ListField(
        StringField(choices=[(x, x) for x in INTERFACE_PROTOCOLS]), default=[])
    profile = PlainReferenceField(InterfaceProfile,
                                  default=InterfaceProfile.get_default_profile)
    # profile locked on manual user change
    profile_locked = BooleanField(required=False, default=False)
    #
    project = ForeignKeyField(Project)
    state = ForeignKeyField(ResourceState)
    vc_domain = ForeignKeyField(VCDomain)
    # Current status
    admin_status = BooleanField(required=False)
    oper_status = BooleanField(required=False)
    oper_status_change = DateTimeField(required=False,
                                       default=datetime.datetime.now)
    full_duplex = BooleanField(required=False)
    in_speed = IntField(required=False)  # Input speed, kbit/s
    out_speed = IntField(required=False)  # Output speed, kbit/s
    bandwidth = IntField(required=False)  # Configured bandwidth, kbit/s
    # Interface hints: uplink, uni, nni
    hints = ListField(StringField(required=False))
    # Coverage
    coverage = PlainReferenceField(Coverage)
    technologies = ListField(StringField())
    # External NRI interface name
    nri_name = StringField()
    #
    service = ReferenceField(Service)

    PROFILE_LINK = "profile"

    def __str__(self):
        return "%s: %s" % (self.managed_object.name, self.name)

    def iter_changed_datastream(self, changed_fields=None):
        if config.datastream.enable_managedobject:
            yield "managedobject", self.managed_object.id

    def save(self, *args, **kwargs):
        if not hasattr(self,
                       "_changed_fields") or "name" in self._changed_fields:
            self.name = self.managed_object.get_profile(
            ).convert_interface_name(self.name)
        if (not hasattr(self, "_changed_fields")
                or "mac" in self._changed_fields) and self.mac:
            self.mac = MACAddressParameter().clean(self.mac)
        try:
            super().save(*args, **kwargs)
        except Exception as e:
            raise ValueError("%s: %s" % (e.__doc__, e.message))
        if not hasattr(self,
                       "_changed_fields") or "service" in self._changed_fields:
            ServiceSummary.refresh_object(self.managed_object)

    def on_delete(self):
        from .macdb import MACDB

        # Remove all subinterfaces
        for si in self.subinterface_set.all():
            si.delete()
        # Unlink
        link = self.link
        if link:
            self.unlink()
        # Flush MACDB
        MACDB.objects.filter(interface=self.id).delete()

    @property
    def link(self):
        """
        Return Link instance or None
        :return:
        """
        if self.type == "aggregated":
            q = {
                "interfaces__in": [self.id] + [i.id for i in self.lag_members]
            }
        else:
            q = {"interfaces": self.id}
        return Link.objects.filter(**q).first()

    @property
    def is_linked(self):
        """
        Check interface is linked
        :returns: True if interface is linked, False otherwise
        """
        if self.type == "aggregated":
            q = {
                "interfaces": {
                    "$in": [self.id] + [i.id for i in self.lag_members]
                }
            }
        else:
            q = {"interfaces": self.id}
        return bool(Link._get_collection().with_options(
            read_preference=ReadPreference.SECONDARY_PREFERRED).find_one(q))

    def unlink(self):
        """
        Remove existing link.
        Raise ValueError if interface is not linked
        """
        link = self.link
        if link is None:
            raise ValueError("Interface is not linked")
        if link.is_ptp or link.is_lag:
            link.delete()
        elif len(link.interfaces) == 2:
            # Depleted cloud
            link.delete()
        else:
            link.interfaces = [i for i in link.interfaces if i.id != self.id]
            link.save()

    def link_ptp(self, other, method=""):
        """
        Create p-t-p link with other interface
        Raise ValueError if either of interface already connected.
        :param other: Other Iface for link
        :param method: Linking method
        :type other: Interface
        :returns: Link instance
        """
        def link_mismatched_lag(agg, phy):
            """
            Try to link LAG to physical interface
            :param agg:
            :param phy:
            :return:
            """
            l_members = [i for i in agg.lag_members if i.oper_status]
            if len(l_members) > 1:
                raise ValueError("More then one active interface in LAG")
            link = Link(interfaces=l_members + [phy], discovery_method=method)
            link.save()
            return link

        # Try to check existing LAG
        el = Link.objects.filter(interfaces=self.id).first()
        if el and other not in el.interfaces:
            el = None
        if (self.is_linked or other.is_linked) and not el:
            raise ValueError("Already linked")
        if self.id == other.id:
            raise ValueError("Cannot link with self")
        if self.type in ("physical", "management"):
            if other.type in ("physical", "management"):
                # Refine LAG
                if el:
                    left_ifaces = [
                        i for i in el.interfaces if i not in (self, other)
                    ]
                    if left_ifaces:
                        el.interfaces = left_ifaces
                        el.save()
                    else:
                        el.delete()
                #
                link = Link(interfaces=[self, other], discovery_method=method)
                link.save()
                return link
            elif other.type == "aggregated" and other.profile.allow_lag_mismatch:
                return link_mismatched_lag(other, self)
            else:
                raise ValueError("Cannot connect %s interface to %s" %
                                 (self.type, other.type))
        elif self.type == "aggregated":
            # LAG
            if other.type == "aggregated":
                # Check LAG size match
                # Skip already linked members
                l_members = [i for i in self.lag_members if not i.is_linked]
                r_members = [i for i in other.lag_members if not i.is_linked]
                if len(l_members) != len(r_members):
                    raise ValueError("LAG size mismatch")
                # Create link
                if l_members:
                    link = Link(interfaces=l_members + r_members,
                                discovery_method=method)
                    link.save()
                    return link
                else:
                    return
            elif self.profile.allow_lag_mismatch:
                return link_mismatched_lag(self, other)
            else:
                raise ValueError("Cannot connect %s interface to %s" %
                                 (self.type, other.type))
        raise ValueError("Cannot link")

    @classmethod
    def get_interface(cls, s):
        """
        Parse <managed object>@<interface> string
        and return interface instance or None
        """
        if "@" not in s:
            raise ValueError("Invalid interface: %s" % s)
        o, i = s.rsplit("@", 1)
        # Get managed object
        try:
            mo = ManagedObject.objects.get(name=o)
        except ManagedObject.DoesNotExist:
            raise ValueError("Invalid manged object: %s" % o)
        # Normalize interface name
        i = mo.get_profile().convert_interface_name(i)
        # Look for interface
        iface = Interface.objects.filter(managed_object=mo.id, name=i).first()
        return iface

    @property
    def subinterface_set(self):
        from .subinterface import SubInterface

        return SubInterface.objects.filter(interface=self.id)

    @property
    def lag_members(self):
        if self.type != "aggregated":
            raise ValueError(
                "Cannot net LAG members for not-aggregated interface")
        return Interface.objects.filter(aggregated_interface=self.id)

    @property
    def effective_vc_domain(self):
        if self.type in ("null", "tunnel", "other", "unknown"):
            return None
        if self.vc_domain:
            return self.vc_domain
        if self.managed_object.vc_domain:
            return self.managed_object.vc_domain
        return VCDomain.get_default()

    @property
    def status(self):
        """
        Returns interface status in form of
        Up/100/Full
        """
        def humanize_speed(speed):
            if not speed:
                return "-"
            for t, n in [(1000000, "G"), (1000, "M"), (1, "k")]:
                if speed >= t:
                    if speed // t * t == speed:
                        return "%d%s" % (speed // t, n)
                    else:
                        return "%.2f%s" % (float(speed) / t, n)
            return str(speed)

        s = [{True: "Up", False: "Down", None: "-"}[self.oper_status]]
        # Speed
        if self.oper_status:
            if self.in_speed and self.in_speed == self.out_speed:
                s += [humanize_speed(self.in_speed)]
            else:
                s += [
                    humanize_speed(self.in_speed),
                    humanize_speed(self.out_speed)
                ]
            s += [{True: "Full", False: "Half", None: "-"}[self.full_duplex]]
        else:
            s += ["-", "-"]
        return "/".join(s)

    def set_oper_status(self, status):
        """
        Set current oper status
        """
        if self.oper_status == status:
            return
        now = datetime.datetime.now()
        if self.oper_status != status and (not self.oper_status_change
                                           or self.oper_status_change < now):
            self.update(oper_status=status, oper_status_change=now)
            if self.profile.status_change_notification:
                logger.debug(
                    "Sending status change notification to %s",
                    self.profile.status_change_notification.name,
                )
                self.profile.status_change_notification.notify(
                    subject="[%s] Interface %s(%s) is %s" % (
                        self.managed_object.name,
                        self.name,
                        self.description or "",
                        "up" if status else "down",
                    ),
                    body="\nInterface %s (%s) is %s \nOn equipment IP - %s \n%s"
                    % (self.name, self.description or "",
                       "up" if status else "down", self.managed_object.address,
                       self.managed_object.description or ""),
                )

    @property
    def parent(self) -> "Interface":
        """
        Returns aggregated interface for LAG or
        self for non-aggregated interface
        """
        if self.aggregated_interface:
            return self.aggregated_interface
        else:
            return self

    def get_profile(self) -> InterfaceProfile:
        if self.profile:
            return self.profile
        return InterfaceProfile.get_default_profile()
Esempio n. 22
0
class Term(Document):
    code = IntField(required=True)
    name = StringField(required=True)
    start = DateTimeField()
    end = DateTimeField()
Esempio n. 23
0
class VLAN(Document):
    meta = {
        "collection": "vlans",
        "strict": False,
        "auto_create_index": False,
        "indexes": [{
            "fields": ["segment", "vlan"],
            "unique": True
        }, "expired"],
    }

    name = StringField()
    profile = PlainReferenceField(VLANProfile)
    vlan = IntField(min_value=1, max_value=4095)
    segment = PlainReferenceField(NetworkSegment)
    description = StringField()
    state = PlainReferenceField(State)
    project = ForeignKeyField(Project)
    # Link to gathering VPN
    vpn = PlainReferenceField(VPN)
    # VxLAN VNI
    vni = IntField()
    # Translation rules when passing border
    translation_rule = StringField(choices=[
        # Rewrite tag to parent vlan's
        ("map", "map"),
        # Append parent tag as S-VLAN
        ("push", "push"),
    ])
    #
    parent = PlainReferenceField("self")
    # Automatically apply segment translation rule
    apply_translation = BooleanField(default=True)
    # Labels
    labels = ListField(StringField())
    effective_labels = ListField(StringField())
    # Integration with external NRI and TT systems
    # Reference to remote system object has been imported from
    remote_system = PlainReferenceField(RemoteSystem)
    # Object id in remote system
    remote_id = StringField()
    # Object id in BI
    bi_id = LongField(unique=True)
    # Discovery integration
    # Timestamp when object first discovered
    first_discovered = DateTimeField()
    # Timestamp when object last seen by discovery
    last_seen = DateTimeField()
    # Timestamp when send "expired" event
    expired = DateTimeField()

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
    _bi_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    def __str__(self):
        return self.name

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return VLAN.objects.filter(id=id).first()

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_bi_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_bi_id(cls, id):
        return VLAN.objects.filter(bi_id=id).first()

    def clean(self):
        super().clean()
        self.segment = NetworkSegment.get_border_segment(self.segment)
        if self.translation_rule and not self.parent:
            self.translation_rule = None

    def refresh_translation(self):
        """
        Set VLAN translation according to segment settings
        :return:
        """
        if not self.apply_translation:
            return
        # Find matching rule
        for vt in self.segment.vlan_translation:
            if vt.filter.check(self.vlan):
                logger.debug(
                    "[%s|%s|%s] Matching translation rule <%s|%s|%s>",
                    self.segment.name,
                    self.name,
                    self.vlan,
                    vt.filter.expression,
                    vt.rule,
                    vt.parent_vlan.vlan,
                )
                if self.parent != vt.parent_vlan or self.translation_rule != vt.translation_rule:
                    self.modify(parent=vt.parent_vlan,
                                translation_rule=vt.rule)
                return
        # No matching rule
        if self.parent or self.translation_rule:
            logger.debug("[%s|%s|%s] No matching translation rule, resetting")
            if self.parent or self.translation_rule:
                self.modify(parent=None, translation_rule=None)

    def on_save(self):
        self.refresh_translation()

    @classmethod
    def can_set_label(cls, label):
        if label.enable_vlan:
            return True
        return False
Esempio n. 24
0
class Uptime(Document):
    meta = {
        "collection": "noc.fm.uptimes",
        "strict": False,
        "auto_create_index": False,
        "indexes": [("object", "stop")],
    }

    object = IntField()
    start = DateTimeField()
    stop = DateTimeField()  # None for active uptime
    last = DateTimeField()  # Last update
    last_value = FloatField()  # Last registred value

    SEC = datetime.timedelta(seconds=1)
    FWRAP = float((1 << 32) - 1) / 100.0
    WRAP = datetime.timedelta(seconds=FWRAP)
    WPREC = 0.1  # Wrap precision

    def __str__(self):
        return "%d" % self.object

    @classmethod
    def register(cls, managed_object: ManagedObject, uptime: int) -> Optional[datetime.datetime]:
        """
        Register uptime
        :param managed_object: Managed object reference
        :param uptime: Registered uptime in seconds
        :returns: Reboot timestamp if detected, None otherwise
        """
        if not uptime:
            return None
        oid = managed_object.id
        now = datetime.datetime.now()
        delta = datetime.timedelta(seconds=uptime)
        logger.debug("[%s] Register uptime %s", managed_object.name, delta)
        # Update data
        c = cls._get_collection()
        d = c.find_one({"object": oid, "stop": None})
        is_rebooted = False
        ts: Optional[datetime.datetime] = None
        if d:
            # Check for reboot
            if d["last_value"] > uptime:
                # Check for counter wrapping
                # Get wrapped delta
                dl = cls.FWRAP - d["last_value"] + uptime
                # Get timestamp delta
                tsd = (now - d["last"]).total_seconds()
                if abs(dl - tsd) > tsd * cls.WPREC:
                    is_rebooted = True
                else:
                    logger.debug("Counter wrap detected")
            if is_rebooted:
                # Reboot registered
                # Closing existing uptime
                ts = now - delta
                logger.debug(
                    "[%s] Closing uptime (%s - %s, delta %s)",
                    managed_object.name,
                    d["start"],
                    ts - cls.SEC,
                    delta,
                )
                c.update({"_id": d["_id"]}, {"$set": {"stop": ts - cls.SEC}})
                # Start new uptime
                logger.debug("[%s] Starting new uptime from %s", managed_object.name, ts)
                c.insert(
                    {"object": oid, "start": ts, "stop": None, "last": now, "last_value": uptime}
                )
                #
                Reboot.register(managed_object, ts, d["last"])
            else:
                logger.debug(
                    "[%s] Refreshing existing uptime (%s - %s)",
                    managed_object.name,
                    d["start"],
                    now,
                )
                c.update({"_id": d["_id"]}, {"$set": {"last": now, "last_value": uptime}})
        else:
            # First uptime
            logger.debug("[%s] First uptime from %s", managed_object.name, now)
            c.insert(
                {
                    "object": oid,
                    "start": now - delta,
                    "stop": None,
                    "last": now,
                    "last_value": uptime,
                }
            )
        return ts
Esempio n. 25
0
class Movie(Document):
    meta = {"collection": "movie"}
    title = StringField(required=True)
    director = ReferenceField(Director, required=True)
    actors = ListField(ReferenceField(Actor))
    release_date = DateTimeField()
Esempio n. 26
0
class Task(EmbeddedDocument):

    name = StringField()
    deadline = DateTimeField(default=datetime.now)
Esempio n. 27
0
class IPLease(EmbeddedDocument):
    address = StringField()
    start_time = DateTimeField()
    end_time = DateTimeField()
Esempio n. 28
0
class Contributions(DynamicDocument):
    project = LazyReferenceField(
        "Projects", required=True, passthrough=True, reverse_delete_rule=CASCADE
    )
    identifier = StringField(required=True, help_text="material/composition identifier")
    formula = StringField(help_text="formula (set dynamically)")
    is_public = BooleanField(
        required=True, default=False, help_text="public/private contribution"
    )
    last_modified = DateTimeField(
        required=True, default=datetime.utcnow, help_text="time of last modification"
    )
    data = DictField(
        default={}, validation=valid_dict, help_text="simple free-form data"
    )
    # TODO in flask-mongorest: also get all ReferenceFields when download requested
    structures = ListField(ReferenceField("Structures"), default=list, max_length=10)
    tables = ListField(ReferenceField("Tables"), default=list, max_length=10)
    notebook = ReferenceField("Notebooks")
    meta = {
        "collection": "contributions",
        "indexes": [
            "project",
            "identifier",
            "formula",
            "is_public",
            "last_modified",
            {"fields": [(r"data.$**", 1)]},
        ],
    }

    @classmethod
    def pre_save_post_validation(cls, sender, document, **kwargs):
        if kwargs.get("skip"):
            return

        # set formula field
        if hasattr(document, "formula"):
            formulae = current_app.config["FORMULAE"]
            document.formula = formulae.get(document.identifier, document.identifier)

        # project is LazyReferenceField
        project = document.project.fetch()

        # run data through Pint Quantities and save as dicts
        def make_quantities(path, key, value):
            if key not in quantity_keys and isinstance(value, (str, int, float)):
                str_value = str(value)
                words = str_value.split()
                try_quantity = bool(len(words) == 2 and is_float(words[0]))

                if try_quantity or is_float(value):
                    field = delimiter.join(["data"] + list(path) + [key])
                    q = Q_(str_value).to_compact()

                    if not q.check(0):
                        q.ito_reduced_units()

                    # ensure that the same units are used across contributions
                    try:
                        column = project.columns.get(path=field)
                        if column.unit != str(q.units):
                            q.ito(column.unit)
                    except DoesNotExist:
                        pass  # column doesn't exist yet (generated in post_save)
                    except DimensionalityError:
                        raise ValueError(
                            f"Can't convert [{q.units}] to [{column.unit}]!"
                        )

                    v = Decimal(str(q.magnitude))
                    vt = v.as_tuple()

                    if vt.exponent < 0:
                        dgts = len(vt.digits)
                        dgts = max_dgts if dgts > max_dgts else dgts
                        v = f"{v:.{dgts}g}"

                        if try_quantity:
                            q = Q_(f"{v} {q.units}")

                    value = {
                        "display": str(q),
                        "value": q.magnitude,
                        "unit": str(q.units),
                    }

            return key, value

        document.data = remap(document.data, visit=make_quantities, enter=enter)

    @classmethod
    def post_save(cls, sender, document, **kwargs):
        if kwargs.get("skip"):
            return

        # avoid circular imports
        from mpcontribs.api.projects.document import Column
        from mpcontribs.api.notebooks.document import Notebooks

        # project is LazyReferenceField
        project = document.project.fetch()

        # set columns field for project
        def update_columns(path, key, value):
            path = delimiter.join(["data"] + list(path) + [key])
            is_quantity = isinstance(value, dict) and quantity_keys == set(value.keys())
            is_text = bool(
                not is_quantity and isinstance(value, str) and key not in quantity_keys
            )
            if is_quantity or is_text:
                try:
                    column = project.columns.get(path=path)
                    if is_quantity:
                        v = value["value"]
                        if v > column.max:
                            column.max = v
                            project.save().reload("columns")
                        elif v < column.min:
                            column.min = v
                            project.save().reload("columns")

                except DoesNotExist:
                    column = Column(path=path)
                    if is_quantity:
                        column.unit = value["unit"]
                        column.min = column.max = value["value"]

                    project.modify(push__columns=column)

                ncolumns = len(project.columns)
                if ncolumns > 50:
                    raise ValueError("Reached maximum number of columns (50)!")

            return True

        # run update_columns over document data
        remap(document.data, visit=update_columns, enter=enter)

        # add/remove columns for other components
        for path in ["structures", "tables"]:
            try:
                project.columns.get(path=path)
            except DoesNotExist:
                if getattr(document, path):
                    project.update(push__columns=Column(path=path))

        # generate notebook for this contribution
        if document.notebook is not None:
            document.notebook.delete()

        cells = [
            nbf.new_code_cell(
                "client = Client(\n"
                '\theaders={"X-Consumer-Groups": "admin"},\n'
                f'\thost="{MPCONTRIBS_API_HOST}"\n'
                ")"
            ),
            nbf.new_code_cell(f'client.get_contribution("{document.id}").pretty()'),
        ]

        if document.tables:
            cells.append(nbf.new_markdown_cell("## Tables"))
            for table in document.tables:
                cells.append(
                    nbf.new_code_cell(f'client.get_table("{table.id}").plot()')
                )

        if document.structures:
            cells.append(nbf.new_markdown_cell("## Structures"))
            for structure in document.structures:
                cells.append(
                    nbf.new_code_cell(f'client.get_structure("{structure.id}")')
                )

        ws = connect_kernel()
        for cell in cells:
            if cell["cell_type"] == "code":
                cell["outputs"] = execute(ws, str(document.id), cell["source"])

        ws.close()
        cells[0] = nbf.new_code_cell("client = Client('<your-api-key-here>')")
        doc = deepcopy(seed_nb)
        doc["cells"] += cells
        document.notebook = Notebooks(**doc).save()
        document.last_modified = datetime.utcnow()
        document.save(signal_kwargs={"skip": True})

    @classmethod
    def pre_delete(cls, sender, document, **kwargs):
        document.reload("notebook", "structures", "tables")

        # remove reference documents
        if document.notebook is not None:
            document.notebook.delete()

        for structure in document.structures:
            structure.delete()

        for table in document.tables:
            table.delete()

    @classmethod
    def post_delete(cls, sender, document, **kwargs):
        # reset columns field for project
        project = document.project.fetch()

        for column in list(project.columns):
            if not isnan(column.min) and not isnan(column.max):
                column.min, column.max = get_min_max(sender, column.path)
                if isnan(column.min) and isnan(column.max):
                    # just deleted last contribution with this column
                    project.update(pull__columns__path=column.path)
            else:
                # use wildcard index if available -> single field query
                field = column.path.replace(delimiter, "__") + "__type"
                qs = sender.objects(**{field: "string"}).only(column.path)

                if qs.count() < 1 or qs.filter(project__name=project.name).count() < 1:
                    project.update(pull__columns__path=column.path)
Esempio n. 29
0
class Sensor(Document):
    meta = {
        "collection": "sensors",
        "strict": False,
        "auto_create_index": False
    }

    profile = PlainReferenceField(SensorProfile,
                                  default=SensorProfile.get_default_profile)
    object = PlainReferenceField(Object)
    managed_object = ForeignKeyField(ManagedObject)
    local_id = StringField()
    state = PlainReferenceField(State)
    units = PlainReferenceField(MeasurementUnits)
    label = StringField()
    dashboard_label = StringField()
    # Sources that find sensor
    sources = ListField(StringField(choices=list(SOURCES)))
    # Timestamp of last seen
    last_seen = DateTimeField()
    # Timestamp expired
    expired = DateTimeField()
    # Timestamp of first discovery
    first_discovered = DateTimeField(default=datetime.datetime.now)
    protocol = StringField(
        choices=["modbus_rtu", "modbus_ascii", "modbus_tcp", "snmp", "ipmi"])
    modbus_register = IntField()
    snmp_oid = StringField()
    ipmi_id = StringField()
    bi_id = LongField(unique=True)
    # Labels
    labels = ListField(StringField())
    effective_labels = ListField(StringField())

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
    _bi_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    def __str__(self):
        if self.object:
            return f"{self.object}: {self.local_id}"
        elif self.managed_object:
            return f"{self.managed_object}: {self.local_id}"
        return f"{self.units}: {self.local_id}"

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return Sensor.objects.filter(id=id).first()

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_bi_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_bi_id(cls, id):
        return Sensor.objects.filter(bi_id=id).first()

    def seen(self, source: Optional[str] = None):
        """
        Seen sensor
        """
        if source and source in SOURCES:
            self.sources = list(set(self.sources or []).union(set([source])))
            self._get_collection().update_one(
                {"_id": self.id}, {"$addToSet": {
                    "sources": source
                }})
        self.fire_event("seen")
        self.touch()  # Worflow expired

    def unseen(self, source: Optional[str] = None):
        """
        Unseen sensor
        """
        logger.info(
            "[%s|%s] Sensor is missed '%s'",
            self.object.name if self.object else "-",
            "-",
            self.local_id,
        )
        if source and source in SOURCES:
            self.sources = list(set(self.sources or []) - set([source]))
            self._get_collection().update_one({"_id": self.id},
                                              {"$pull": {
                                                  "sources": source
                                              }})
        elif not source:
            # For empty source, clean sources
            self.sources = []
            self._get_collection().update_one({"_id": self.id},
                                              {"$set": {
                                                  "sources": []
                                              }})
        if not self.sources:
            # source - None, set sensor to missed
            self.fire_event("missed")
            self.touch()

    @classmethod
    def sync_object(cls, obj: Object) -> None:
        """
        Synchronize sensors with object model
        :param obj:
        :return:
        """
        # Get existing sensors
        obj_sensors: Dict[str, Sensor] = {
            s.local_id: s
            for s in Sensor.objects.filter(object=obj.id)
        }
        m_proto = [
            d.value for d in obj.get_effective_data()
            if d.interface == "modbus" and d.attr == "type"
        ] or ["rtu"]
        # Create new sensors
        for sensor in obj.model.sensors:
            if sensor.name in obj_sensors:
                obj_sensors[sensor.name].seen("objectmodel")
                del obj_sensors[sensor.name]
                continue
            #
            logger.info("[%s|%s] Creating new sensor '%s'",
                        obj.name if obj else "-", "-", sensor.name)
            s = Sensor(
                profile=SensorProfile.get_default_profile(),
                object=obj,
                local_id=sensor.name,
                units=sensor.units,
                label=sensor.description,
            )
            # Get sensor protocol
            if sensor.modbus_register:
                if not m_proto:
                    continue
                s.protocol = "modbus_%s" % m_proto[0].lower()
                s.modbus_register = sensor.modbus_register
            elif sensor.snmp_oid:
                s.protocol = "snmp"
                s.snmp_oid = sensor.snmp_oid
            else:
                logger.info(
                    "[%s|%s] Unknown sensor protocol '%s'",
                    obj.name if obj else "-",
                    "-",
                    sensor.name,
                )
            s.save()
            s.seen("objectmodel")
        # Notify missed sensors
        for s in sorted(obj_sensors):
            sensor = obj_sensors[s]
            sensor.unseen(source="objectmodel")

    @classmethod
    def can_set_label(cls, label):
        return Label.get_effective_setting(label, setting="enable_sensor")

    @classmethod
    def iter_effective_labels(cls, intance: "Sensor") -> Iterable[List[str]]:
        yield intance.labels or [] + intance.profile.labels or []
Esempio n. 30
0
class Maintenance(Document):
    meta = {
        "collection": "noc.maintenance",
        "strict": False,
        "auto_create_index": False,
        "indexes": [("start", "is_completed"), "administrative_domain"],
        "legacy_collections": ["noc.maintainance"],
    }

    type = ReferenceField(MaintenanceType)
    subject = StringField(required=True)
    description = StringField()
    start = DateTimeField()
    stop = DateTimeField()
    is_completed = BooleanField(default=False)
    auto_confirm = BooleanField(default=True)
    template = ForeignKeyField(Template)
    contacts = StringField()
    suppress_alarms = BooleanField()
    # Escalate TT during maintenance
    escalate_managed_object = ForeignKeyField(ManagedObject)
    # Time pattern when maintenance is active
    # None - active all the time
    time_pattern = ForeignKeyField(TimePattern)
    # Objects declared to be affected by maintenance
    direct_objects = ListField(EmbeddedDocumentField(MaintenanceObject))
    # Segments declared to be affected by maintenance
    direct_segments = ListField(EmbeddedDocumentField(MaintenanceSegment))
    # All Administrative Domain for all affected objects
    administrative_domain = ListField(ForeignKeyField(AdministrativeDomain))
    # Escalated TT ID in form
    # <external system name>:<external tt id>
    escalation_tt = StringField(required=False)
    # @todo: Attachments

    _id_cache = cachetools.TTLCache(maxsize=100, ttl=60)

    @classmethod
    @cachetools.cachedmethod(operator.attrgetter("_id_cache"),
                             lock=lambda _: id_lock)
    def get_by_id(cls, id):
        return Maintenance.objects.filter(id=id).first()

    def update_affected_objects_maintenance(self):
        call_later(
            "noc.maintenance.models.maintenance.update_affected_objects",
            60,
            maintenance_id=self.id,
        )

    def auto_confirm_maintenance(self):
        stop = datetime.datetime.strptime(self.stop, "%Y-%m-%dT%H:%M:%S")
        now = datetime.datetime.now()
        if stop > now:
            delay = (stop - now).total_seconds()
            call_later("noc.maintenance.models.maintenance.stop",
                       delay,
                       maintenance_id=self.id)

    def save(self, *args, **kwargs):
        created = False
        if self._created:
            created = self._created
        if self.direct_objects:
            if any(o_elem.object is None for o_elem in self.direct_objects):
                raise ValidationError("Object line is Empty")
        if self.direct_segments:
            for elem in self.direct_segments:
                try:
                    elem.segment = elem.segment
                except Exception:
                    raise ValidationError("Segment line is Empty")
        super().save(*args, **kwargs)
        if created:
            self.update_affected_objects_maintenance()
            self.auto_confirm_maintenance()

    def on_save(self):
        if (hasattr(self, "_changed_fields")
                and "direct_objects" in self._changed_fields
                or hasattr(self, "_changed_fields")
                and "direct_segments" in self._changed_fields):
            self.update_affected_objects_maintenance()

        if hasattr(self, "_changed_fields") and "stop" in self._changed_fields:
            if not self.is_completed and self.auto_confirm:
                self.auto_confirm_maintenance()

        if hasattr(
                self,
                "_changed_fields") and "is_completed" in self._changed_fields:
            AffectedObjects._get_collection().remove({"maintenance": self.id})

        if self.escalate_managed_object:
            if not self.is_completed and self.auto_confirm:
                call_later(
                    "noc.services.escalator.maintenance.start_maintenance",
                    delay=max(
                        (dateutil.parser.parse(self.start) -
                         datetime.datetime.now()).total_seconds(),
                        60,
                    ),
                    scheduler="escalator",
                    pool=self.escalate_managed_object.escalator_shard,
                    maintenance_id=self.id,
                )
                if self.auto_confirm:
                    call_later(
                        "noc.services.escalator.maintenance.close_maintenance",
                        delay=max(
                            (dateutil.parser.parse(self.stop) -
                             datetime.datetime.now()).total_seconds(),
                            60,
                        ),
                        scheduler="escalator",
                        pool=self.escalate_managed_object.escalator_shard,
                        maintenance_id=self.id,
                    )
            if self.is_completed and not self.auto_confirm:
                call_later(
                    "noc.services.escalator.maintenance.close_maintenance",
                    scheduler="escalator",
                    pool=self.escalate_managed_object.escalator_shard,
                    maintenance_id=self.id,
                )

    @classmethod
    def currently_affected(cls):
        """
        Returns a list of currently affected object ids
        """
        affected = set()
        now = datetime.datetime.now()
        for d in cls._get_collection().find(
            {
                "start": {
                    "$lte": now
                },
                "stop": {
                    "$gte": now
                },
                "is_completed": False
            },
            {
                "_id": 1,
                "time_pattern": 1
            },
        ):
            if d.get("time_pattern"):
                # Restrict to time pattern
                tp = TimePattern.get_by_id(d["time_pattern"])
                if tp and not tp.match(now):
                    continue
            data = [
                {
                    "$match": {
                        "maintenance": d["_id"]
                    }
                },
                {
                    "$project": {
                        "_id": 0,
                        "objects": "$affected_objects.object"
                    },
                },
            ]
            for x in AffectedObjects._get_collection().aggregate(data):
                affected.update(x["objects"])
        return list(affected)

    @classmethod
    def get_object_maintenance(cls, mo):
        """
        Returns a list of active maintenance for object
        :param mo: Managed Object instance
        :return: List of Maintenance instances or empty list
        """
        r = []
        now = datetime.datetime.now()
        for m in Maintenance.objects.filter(
                start__lte=now, is_completed=False).order_by("start"):
            if m.time_pattern and not m.time_pattern.match(now):
                continue
            if AffectedObjects.objects.filter(maintenance=m,
                                              affected_objects__object=mo.id):
                r += [m]
        return r