Exemple #1
0
    def create_or_replace(cls, **kwargs):
        ip = kwargs.get('ip', '')
        obj = cls.get_by_key(ip, key='ip')
        try:

            if obj is None or obj.status == cls.STATUS_DELETE:
                obj = cls()
                obj.ip = ip
                obj.status = cls.STATUS_NEW
                obj.created_time = timezone.now()
                obj.weight = cls.WEIGHT_LOW

            obj.last_discove_time = timezone.now()
            obj.name = kwargs.get('name', obj.name)
            obj.os = kwargs.get('os', obj.os)
            obj.mac = kwargs.get('mac', obj.mac)

            db.session.add(obj)
            db.session.commit()

            for app in kwargs.get('apps', []):
                Application.create_or_replace(app.get('name', ''),
                    app.get('protocol', ''),
                    app.get('port', 0),
                    app.get('state', ''),
                    app.get('product', ''),
                    app.get('version', ''),
                    obj,
                )
        except BaseException as e:
            logger.error(e)
            logger.exception(traceback.format_exc())
            db.session.rollback()

        return obj
Exemple #2
0
    def create_or_replace(cls, key, plugin_ident, payloads):
        obj = cls.query.filter_by(ip=key, plugin_ident=plugin_ident).first()
        if obj is None:
            obj = cls()
            obj.ip = key
            obj.plugin_ident = plugin_ident
            obj.created_time = timezone.now()

        obj.last_discover_time = timezone.now()
        obj.payloads = json.dumps(payloads) if isinstance(
            payloads, (list, dict)) else payloads
        db.session.add(obj)
        db.session.commit()
Exemple #3
0
 async def create_price_history(self, db_connection, stock):
     query = price_history.insert().values(product_stock_id=stock.id,
                                           price_base=stock.price_base,
                                           price_sale=stock.price_sale,
                                           price_card=stock.price_card,
                                           dt=now())
     await db_connection.execute(query)
    def consume(self,callback,isjson=True):
        """
        Return True if some resource has been consumed; otherwise return False
        """
        status = self.status
        if status[0]:
            #the latest resource has been consumed
            return False

        resource_client = AzureBlob(status[1][0],connection_string,container_name)
        if isjson:
            callback(resource_client.json)
        else:
            res_file = resource_client.download()
            try:
                with open(res_file,'rb') as f:
                    callback(f)
            finally:
                #after processing,remove the downloaded local resource file
                os.remove(res_file)
        #update the client consume data
        client_metdata = {
            "resource_id" : status[1][0],
            "publish_date" : status[1][1],
            "consume_date": timezone.now()
        }

        self.update(client_metadata)

        return True
Exemple #5
0
def archive_by_date(d,
                    delete_after_archive=False,
                    check=False,
                    overwrite=False,
                    backup_to_archive_table=True,
                    rearchive=False):
    """
    Archive the logged point within the specified date
    delete_after_archive: delete the archived data from table tracking_loggedpoint
    check: check whether archiving is succeed or not
    overwrite: if true, overwrite the existing archived file;if false, throw exception if already archived 
    rearchive: if true, rearchive the existing archived file;if false, throw exception if already archived 
    return a tuple (archived or not, archive type(archive,overwrite,rearchive),archived_metadata)
    """
    now = timezone.now()
    today = now.date()
    if d >= today:
        raise Exception(
            "Can only archive the logged points happened before today")
    archive_group = get_archive_group(d)
    archive_id = get_archive_id(d)
    start_date = timezone.datetime(d.year, d.month, d.day)
    end_date = start_date + timedelta(days=1)
    backup_table = get_backup_table(d) if backup_to_archive_table else None
    return archive(archive_group,
                   archive_id,
                   start_date,
                   end_date,
                   delete_after_archive=delete_after_archive,
                   check=check,
                   overwrite=overwrite,
                   rearchive=rearchive,
                   backup_table=backup_table)
Exemple #6
0
    def get(self, request, *args, **kwargs):
        """
        下载。/?download
        HTML预 /?html
        PDF预 /

        Return a HTTPResponse either of a PDF file or HTML.

        :rtype: HttpResponse
        """
        self.get_options_filename()
        if 'html' in request.GET:
            # Output HTML
            content = self.render_html(*args, **kwargs)
            return HttpResponse(content)

        else:
            # Output PDF
            content = self.render_pdf(*args, **kwargs)

            response = HttpResponse(content, content_type='application/pdf')

            if (not self.inline or 'download' in request.GET) and 'inline' not in request.GET:
                response['Content-Disposition'] = 'attachment; filename="%s"' % self.get_filename().format(
                    name='{}'.format(str(timezone.now()).split('.')[0])
                )

            response['Content-Length'] = len(content)

            return response
Exemple #7
0
 def save_with_log(self, user_id):
     if not self.create_user_id:
         self.create_user_id = user_id
     self.update_user_id = user_id
     if not self.create_time:
         self.create_time = timezone.now()
     self.save()
Exemple #8
0
    def get_periodic_sorted_tasks(self, schedule):
        dt_now = localtime(now())
        tasks = get_tasks(schedule)

        tasks_periodic = []
        count_every = 0
        for task in tasks:
            type_task = task.type
            if type_task == 'every':
                time_task = task.time
                offset_minutes = count_every + OFFSET_FOR_EVERY
                dt_task = dt_now + datetime.timedelta(minutes=offset_minutes)
                dt_limit = dt_task + datetime.timedelta(days=1)
                while dt_task < dt_limit:
                    struct_time = dt_task.timetuple()
                    time = {
                        'hour': struct_time.tm_hour, 'minute': struct_time.tm_min,
                        'second': struct_time.tm_sec, 'microsecond': 0,
                        }
                    tasks_periodic.append(TaskType(task.name, 'periodic', task.executor, time))
                    time_task_kw = {
                        'hours': time_task.get('hour', 0),
                        'minutes': time_task.get('minute', 0),
                        'seconds': time_task.get('second', 0),
                        }
                    dt_task = dt_task + datetime.timedelta(**time_task_kw)
                count_every += 1
            else:
                tasks_periodic.append(task)

        return sorted(tasks_periodic, key=lambda item: dt_now.replace(**item.time))
Exemple #9
0
 def __init__(self,
              user=None,
              type=None,
              name='',
              job_params=None,
              status=None,
              explain='',
              created_time=None,
              start_time=None,
              finish_time=None,
              progress=0,
              id=None,
              user_id=None,
              from_cache=False,
              *args,
              **kwargs):
     self.id = id
     self.name = name
     self.user_id = user.id if user else user_id
     self.type = type
     job_params = {} if job_params is None else job_params
     self.job_params = json.dumps(job_params) if isinstance(
         job_params, (dict, )) else job_params
     self.progress = progress
     self.status = self.STATUS_WATING if status is None else status
     self.explain = explain
     self.created_time = timezone.now(
     ) if created_time is None else created_time
     self.start_time = start_time
     self.finish_time = finish_time
     self.from_cache = from_cache
    async def notice_msg_for_send_iter(self):
        bulk_size = 10
        send_ids = []
        while True:
            dt_send = now()
            async with db.engine.acquire() as db_connection:
                # .where(notice_msg.c.dt_send.is_(None))\
                query = sa.select((notice_msg.c.id, user.c.user_id_orig, user.c.first_name, notice_msg.c.text))\
                    .select_from(notice_msg.join(user, user.c.id == notice_msg.c.user_id))\
                    .where(notice_msg.c.status == StatusNoticeEnum.not_send.value)\
                    .order_by(user.c.id)\
                    .limit(bulk_size)
                res = await db_connection.execute(query)
                recs = await res.fetchall()

            if len(recs) == 0:
                break

            send_ids.clear()
            for item in recs:
                yield item.user_id_orig, item.first_name, item.text
                send_ids.append(item.id)

            async with db.engine.acquire() as db_connection:
                query = notice_msg.update().values(
                    dt_send=dt_send, status=StatusNoticeEnum.in_progress.value
                    )\
                    .where(notice_msg.c.id.in_(send_ids))
                await db_connection.execute(query)
Exemple #11
0
    def result(self, sub_job, type, result):
        data = result.get('data', [])

        if sub_job.type == JobType.SYS_ASSET_DISCOVER:
            job = sub_job.job
            for asset in data:
                # 存储资产
                obj = SysAsset.create_or_replace(**asset)
                job_params = {
                    'ip': obj.ip,
                    'plugins': job.job_params_object.get('plugins', []),
                }
                # 下载漏洞检查任务
                SubJob.create(job, JobType.SYS_VUL_CHECK, job_params)
        elif sub_job.type == JobType.SYS_VUL_CHECK:
            now = timezone.now()
            for asset in data:
                key = asset.get('key', '')
                for k, v in asset.items():
                    if k == 'key':
                        continue
                    AssetSysVul.create_or_replace(key=key,
                                                  plugin_ident=k,
                                                  payloads=v.get(
                                                      'payloads', {}))

                AssetSysVul.delete_not_found(key, now)

        sub_job.finish(status=result.get('status'),
                       explain=result.get('explain', ''))
        SubJobResult.create(sub_job, data)
Exemple #12
0
    def __get__(self, instance, owner):
        """
            Get the value, or generate if needed
            """
        if instance is None:
            return self
        try:
            return getattr(instance, self.propname)
        except AttributeError:
            pass

        key = self._make_key(instance)

        client = create_client()
        document = client.collection("cache").document(key).get()
        if not document.exists:
            value = None
        else:
            data = document.to_dict()
            if data["expires"] <= timezone.now():
                value = None
            else:
                value = data["value"]

        if value is None:
            value = self.getter(instance)
            self.validate_and_set(document.reference, value)
        self.set_value(instance, value)
        return value
Exemple #13
0
    def create_or_replace(cls, name, protocol, port, state, product, version, asset, *args, **kwargs):
        obj = db.session.query(cls).filter_by(asset_id=asset.id, port=port).first()
        if obj is None or obj.status == cls.STATUS_DELETE:
            obj = cls()
            obj.port = port
            obj.created_time = timezone.now()
            obj.status = cls.STATUS_NEW
            obj.asset_id = asset.id

        obj.name = name
        obj.protocol = protocol
        obj.state = state
        obj.product = product
        obj.last_discove_time = timezone.now()

        db.session.add(obj)
        db.session.commit()
Exemple #14
0
 def stats(cls):
     return {
         'total':
         cls.query.count(),
         '24_hour':
         cls.query.filter(cls.created_time >= timezone.now() -
                          timedelta(days=1)).count(),
     }
    def _archive(self):
        """
        Archive the az logs
        return True if some az logs are archived; False if nothing is archived
        """
        query_start, query_end = self.get_query_interval()
        if not query_end:
            logger.info(
                "All az logs have been archived.the end time of the last archiving is {}"
                .format(query_start))
            return False

        logger.info("Archive az logs between {} and {}".format(
            query_start, query_end))
        resource_id = self.get_resource_id(query_start)
        metadata = {
            "start_archive": timezone.now(),
            "resource_id": resource_id,
            self.ARCHIVE_STARTTIME: query_start,
            self.ARCHIVE_ENDTIME: query_end
        }
        self.set_metadata(metadata)

        try:
            dump_file = None
            with tempfile.NamedTemporaryFile(
                    suffix=".json",
                    prefix=self.settings.RESOURCE_NAME,
                    delete=False) as f:
                dump_file = f.name

            if self.settings.TENANT:
                login_cmd = "az login --service-principal -u {} -p {} --tenant {}".format(
                    self.settings.USER, self.settings.PASSWORD,
                    self.settings.TENANT)
            else:
                login_cmd = "az login -u {} -p {}".format(
                    self.settings.USER, self.settings.PASSWORD)

            cmd = "{}&&az monitor log-analytics query -w {} --analytics-query '{}' -t {}/{} > {}".format(
                login_cmd, self.settings.WORKSPACE, self.settings.QUERY,
                timezone.utctime(query_start).strftime("%Y-%m-%dT%H:%M:%SZ"),
                timezone.utctime(query_end).strftime("%Y-%m-%dT%H:%M:%SZ"),
                dump_file)
            subprocess.check_output(cmd, shell=True)

            with open(dump_file, 'r') as f:
                data = json.loads(f.read())
                metadata["log_records"] = len(data)
            resourcemetadata = self.resource_repository.push_file(
                dump_file,
                metadata,
                f_post_push=_set_end_datetime("end_archive"))

            return True
        finally:
            remove_file(dump_file)
Exemple #16
0
 def stats(cls):
     return {
         'total':
         db.session.query(cls).filter(cls.status == cls.STATUS_OK).count(),
         'online':
         db.session.query(cls).filter(
             cls.status == cls.STATUS_OK, cls.heartbeat_time >=
             timezone.now() - timedelta(minutes=3)).count()
     }
Exemple #17
0
 def __init__(self, sub_job, result, created_time=None, status=None):
     self.sub_job_id = sub_job.id
     self.result = json.dumps(result) if isinstance(result, (
         dict,
         list,
         tuple,
     )) else result
     self.created_time = timezone.now(
     ) if created_time is None else created_time
     self.status = self.STATUS_OK if status is None else status
Exemple #18
0
 def start(self, executor=None):
     self.status = self.STATUS_DOING
     self.executor = executor if executor else self.executor
     self.start_time = timezone.now()
     redis.zadd(
         REDIS_KEYS['JOB_QUEUE'].format(type=JobType.KEYS.get(self.type),
                                        id=self.job.id), -1, self.id)
     self.job.start()
     db.session.add(self)
     db.session.commit()
Exemple #19
0
 def __init__(self, name, email, is_super=False, created_time=None, last_login_time=None, status=None, id=None, password=None, role_id=None):
     self.id = id
     self.name = name
     self.password = password
     self.email = email
     self.role_id = role_id
     self.created_time = created_time if created_time else timezone.now()
     self.last_login_time = last_login_time
     self.is_super = is_super
     self.status =  self.STATUS_REGISTED if status is None else status
Exemple #20
0
def _archive_file(storage,
                  f,
                  resource_id,
                  checking_policy,
                  check_md5,
                  metadata={}):
    #push the updated or new files into storage
    file_status = os.stat(f)
    file_modify_date = file_status.st_mtime_ns
    file_size = file_status.st_size
    if check_md5:
        file_md5 = utils.file_md5(f)
    else:
        file_md5 = None

    try:
        res_metadata = storage.get_resource_metadata(resource_id)
    except ResourceNotFound as ex:
        res_metadata = None

    is_changed = False
    for policy in checking_policy:
        if policy == FILE_MD5:
            if not res_metadata or res_metadata.get("file_md5") != file_md5:
                is_changed = True
                break
        elif policy == FILE_MODIFY_DATE:
            if not res_metadata or res_metadata.get(
                    "file_modify_date") != file_modify_date:
                is_changed = True
                break
        elif policy == FILE_SIZE:
            if not res_metadata or res_metadata.get("file_msize") != file_size:
                is_changed = True
                break
        else:
            raise Exception("Checking policy({}) Not Support".format(policy))

    if not is_changed:
        logger.debug(
            "File({},{}) is not changed, no need to archive again".format(
                f, resource_id))
        return False

    metadata["archive_time"] = timezone.now()
    metadata["resource_id"] = resource_id
    metadata["file_modify_date"] = file_modify_date
    metadata["file_size"] = file_size
    if check_md5:
        metadata["file_md5"] = file_md5

    storage.push_file(f, metadata=metadata)
    logger.debug("File({},{}) was archived successfully.".format(
        f, resource_id))
    return True
Exemple #21
0
    def authenticate(cls, name, password):
        user = cls.query.filter_by(name=name).first()
        if not user:
            user = cls.query.filter_by(email=name).first()

        if user and check_password_hash(user.password, password):
            user.last_login_time = timezone.now()
            db.session.add(user)
            db.session.commit()
            return user

        return None
Exemple #22
0
class User(Model):
    uid = CharField(verbose_name='用户id',
                    max_length=32,
                    index=True,
                    unique=True,
                    primary_key=True)
    username = CharField(verbose_name='用户名', max_length=24)
    password = CharField(128, verbose_name='密码')
    last_login = DateTimeField(verbose_name='last_login',
                               default=timezone.now())
    phone = CharField(verbose_name='手机号', null=True, max_length=13)
    nickname = CharField(verbose_name='昵称', max_length=24, null=True)
    is_active = BooleanField(default=True)
    is_admin = BooleanField(default=False)
    register_time = DateTimeField(null=True, default=timezone.now())
    version = IntegerField(verbose_name='密码版本', default=1)
    email = CharField(20, verbose_name='邮箱', null=True)
    # 0女1男
    gender = CharField(1, verbose_name='性别', default='1')
    avatar = CharField(256, verbose_name='头像', default='')
    # 0普通用户 1管理员
    role = CharField(1, verbose_name='角色', default='0')
    deleted = CharField(1, verbose_name='是否删除', default='0')

    def __str__(self):
        return self.get_username()

    def set_password(self, raw_password):
        self.password = generate_password_hash(raw_password)
        return self.password

    def check_password(self, raw_password):
        return check_password_hash(self.password, raw_password)

    def save(self, *args, **kwargs):
        super().save(*args, **kwargs)

    class Meta:
        database = BaseModel.db
        table_name = 'system_user'
Exemple #23
0
 def __init__(self,
              key,
              value=None,
              status=None,
              created_time=None,
              from_cache=False,
              id=None):
     self.id = id
     self.key = key
     self.value = value
     self.status = self.STATUS_OK if status is None else status
     self.created_time = timezone.now(
     ) if created_time is None else created_time
     self.from_cache = from_cache
Exemple #24
0
def need_delete(meta):
    resourceid = meta["resource_id"]
    file_folder, file_name = os.path.split(resourceid)
    if file_name[0] == ".":
        return True
    else:
        if not settings.FILE_RE.search(file_name):
            return True

    if meta.get(ResourceConstant.DELETED_KEY, False) and meta.get(
            ResourceConstant.DELETE_TIME_KEY) and timezone.now() > meta.get(
                ResourceConstant.DELETE_TIME_KEY) + DELETED_RESROURCE_EXPIRED:
        return True

    return False
Exemple #25
0
    def finish(self, status=None, explain=''):
        if self.status not in [
                self.STATUS_SUCCESS, self.STATUS_CANCEL, self.STATUS_FAILURE,
                self.STATUS_DELETE
        ]:
            Executor.running_decr_by_ident(self.executor, self.type)

        self.status = self.STATUS_SUCCESS if status is None else status
        self.finish_time = timezone.now()
        self.explain = explain
        db.session.add(self)
        db.session.commit()
        redis.zadd(
            REDIS_KEYS['JOB_QUEUE'].format(type=JobType.KEYS.get(self.type),
                                           id=self.job.id), 1, self.id)
Exemple #26
0
    def finish(self, status=None, explain=''):
        job = self
        if getattr(self, 'from_cache', False):
            job = self.get_by_key(self.id)

        job.finish_time = timezone.now()
        job.status = self.STATUS_SUCCESS if status is None else self.STATUS_FAILURE
        job.progress = self.PROGRESS_COMPLATE
        job.explain = explain
        db.session.add(job)
        db.session.commit()
        redis.delete(REDIS_KEYS['JOB_CONTENT'].format(id=self.id))
        redis.lrem(REDIS_KEYS['JOB_DOING'], 0, self.id)
        for _, key in JobType.KEYS.items():
            redis.delete(REDIS_KEYS['JOB_QUEUE'].format(type=key, id=self.id))

        return True
Exemple #27
0
 def get_next(self, call_in_start=False):
     while True:
         try:
             next_item = self.queue.popleft()
             wait_seconds = self.get_wait_seconds(next_item.time)
             if wait_seconds < 0 and call_in_start:
                 continue
             break
         except IndexError:  # очередь закончилась
             self.queue = deque(self.tasks)
             next_item = self.queue.popleft()
             wait_seconds = self.get_wait_seconds(next_item.time)
             if wait_seconds < 0:
                 dt_reference = localtime(now()) + datetime.timedelta(days=1)
                 wait_seconds = self.get_wait_seconds(next_item.time, dt_reference)
             break
     return next_item.executor, wait_seconds
Exemple #28
0
 def start(self):
     job = self
     if getattr(self, 'from_cache', False):
         job = self.get_by_key(self.id)
     if job.start_time is None:
         job.start_time = timezone.now()
     job.status = self.STATUS_DOING
     redis_content = {'status': self.STATUS_DOING}
     if job.progress < self.PROGRESS_DOING:
         job.progress = self.PROGRESS_DOING
         redis_content['progress'] = self.PROGRESS_DOING
     db.session.add(job)
     db.session.commit()
     redis.hmset(REDIS_KEYS['JOB_CONTENT'].format(id=self.id), {
         'status': self.STATUS_DOING,
         'progress': self.PROGRESS_DOING
     })
    def get_query_interval(self):
        """
        Return the current query interval(start,end) based on the last archiving;
        return (start,None) if all logs are archived
    
        """
        last_resource = self.resource_repository.last_resource
        if last_resource:
            query_start = last_resource[1][self.ARCHIVE_ENDTIME]
        else:
            query_start = self.settings.QUERY_START

        query_end = query_start + self.settings.QUERY_DURATION

        if query_end < timezone.now() - self.settings.LOG_DELAY_TIME:
            return (query_start, query_end)
        else:
            return (query_start, None)
Exemple #30
0
    def create(cls, ident, hostname, pid, type, total, busy, idle):
        executor = cls.query.filter_by(ident=ident, type=type).first()
        if executor is None:
            executor = cls(ident=ident,
                           hostname=hostname,
                           pid=pid,
                           type=type,
                           total=total,
                           busy=busy,
                           idle=idle)

        executor.total = total
        executor.busy = busy
        executor.pid = pid
        executor.heartbeat_time = timezone.now()
        db.session.add(executor)
        db.session.commit()
        return executor