Пример #1
0
def test_perform_indexing_api_request_failure_index(initialized_db,
                                                    set_secscan_config):
    secscan = V4SecurityScanner(app, instance_keys, storage)
    secscan._secscan_api = mock.Mock()
    secscan._secscan_api.state.return_value = {"state": "abc"}
    secscan._secscan_api.index.side_effect = APIRequestFailure()

    next_token = secscan.perform_indexing()

    assert next_token is None
    assert ManifestSecurityStatus.select().count() == 0

    # Set security scanner to return good results and attempt indexing again
    secscan._secscan_api.index.side_effect = None
    secscan._secscan_api.index.return_value = (
        {
            "err": None,
            "state": IndexReportState.Index_Finished
        },
        "abc",
    )

    next_token = secscan.perform_indexing()

    assert next_token.min_id == Manifest.select(fn.Max(
        Manifest.id)).scalar() + 1
    assert ManifestSecurityStatus.select().count() == Manifest.select(
        fn.Max(Manifest.id)).count()
Пример #2
0
    def get_transaction_list_details(transaction_list):
        """Return complete data set on a specified transaction."""
        query = (Files().select(
            Files.transaction.alias('upload_id'),
            fn.Max(Transactions.updated).alias('upload_date'),
            fn.Min(Files.mtime).alias('file_date_start'),
            fn.Max(Files.mtime).alias('file_date_end'),
            fn.Min(Transactions.submitter).alias('uploaded_by_id'),
            fn.Sum(Files.size).alias('bundle_size'),
            fn.Count(Files.id).alias('file_count'),
            fn.Min(Transactions.updated).alias('upload_datetime'),
            fn.Min(Transactions.proposal).alias('proposal_id'),
            fn.Min(Transactions.instrument).alias('instrument_id')).join(
                Transactions).where(
                    Files.transaction << transaction_list).group_by(
                        Files.transaction))

        return {
            str(r['upload_id']): {
                'upload_id': str(r['upload_id']),
                'upload_date': r['upload_date'].date().strftime('%Y-%m-%d'),
                'file_date_start':
                r['file_date_start'].date().strftime('%Y-%m-%d'),
                'file_date_end':
                r['file_date_end'].date().strftime('%Y-%m-%d'),
                'uploaded_by_id': int(r['uploaded_by_id']),
                'bundle_size': int(r['bundle_size']),
                'file_count': int(r['file_count']),
                'upload_datetime':
                r['upload_date'].strftime('%Y-%m-%d %H:%M:%S'),
                'proposal_id': r['proposal_id'],
                'instrument_id': r['instrument_id']
            }
            for r in query.dicts()
        }
Пример #3
0
    def _get_earliest_latest(item_type, item_list, time_basis):
        accepted_item_types = list(
            set(
                list(QueryBase.object_type_mappings.keys()) +
                list(QueryBase.object_type_mappings.values())))
        accepted_time_basis_types = [
            'submitted',
            'modified',
            'created',
            'submit',
            'modified',
            'create',
            'submit_time',
            'modified_time',
            'create_time',
            'submitted_date',
            'modified_date',
            'created_date',
        ]
        item_type = QueryBase.object_type_mappings.get(item_type)
        time_basis = time_basis.lower()
        if item_type not in accepted_item_types or time_basis not in accepted_time_basis_types:
            raise HTTPError('400 Invalid Query')

        short_time_basis = time_basis[:5]

        time_basis = {
            'submi': lambda x: 'submitted',
            'modif': lambda x: 'modified',
            'creat': lambda x: 'created'
        }[short_time_basis](short_time_basis)
        search_field = getattr(TransSIP, '{0}'.format(item_type))
        if time_basis == 'submitted':
            query = TransSIP().select(
                fn.Min(TransSIP.updated).alias('earliest'),
                fn.Max(TransSIP.updated).alias('latest'),
            )
        if time_basis in ['modified', 'created']:
            time_basis_field = getattr(Files, '{0}time'.format(time_basis[:1]))
            query = Files().select(
                fn.Min(time_basis_field).alias('earliest'),
                fn.Max(time_basis_field).alias('latest'),
            ).join(TransSIP, on=(TransSIP.id == Files.transaction))

        query = query.where(search_field << item_list)
        row = query.get()
        if row.earliest is None or row.latest is None:
            message = ''
            raise HTTPError('404 Not Found', message)

        return {
            'earliest': row.earliest.strftime('%Y-%m-%d %H:%M:%S'),
            'latest': row.latest.strftime('%Y-%m-%d %H:%M:%S')
        }
Пример #4
0
def test_perform_indexing_api_request_index_error_response(initialized_db, set_secscan_config):
    secscan = V4SecurityScanner(app, instance_keys, storage)
    secscan._secscan_api = mock.Mock()
    secscan._secscan_api.state.return_value = {"state": "xyz"}
    secscan._secscan_api.index.return_value = (
        {"err": "something", "state": IndexReportState.Index_Error},
        "xyz",
    )

    next_token = secscan.perform_indexing()
    assert next_token.min_id == Manifest.select(fn.Max(Manifest.id)).scalar() + 1
    assert ManifestSecurityStatus.select().count() == Manifest.select(fn.Max(Manifest.id)).count()
    for mss in ManifestSecurityStatus.select():
        assert mss.index_status == IndexStatus.FAILED
    def get_transaction_list_details(transaction_list):
        """Return complete data set on a specified transaction."""
        # pylint: disable=no-member
        query = (Files().select(
            Files.transaction.alias('upload_id'),
            fn.Max(TransSIP.updated).alias('upload_date'),
            fn.Min(Files.mtime).alias('file_date_start'),
            fn.Max(Files.mtime).alias('file_date_end'),
            fn.Min(TransSIP.submitter).alias('uploaded_by_id'),
            fn.Sum(Files.size).alias('bundle_size'),
            fn.Count(Files.id).alias('file_count'),
            fn.Min(TransSIP.updated).alias('upload_datetime'),
            fn.Min(TransSIP.project).alias('project_id'),
            fn.Min(TransSIP.instrument).alias('instrument_id')).join(
                TransSIP,
                on=(TransSIP.id == Files.transaction
                    )).where(Files.transaction << transaction_list).group_by(
                        Files.transaction))
        # pylint: enable=no-member

        return {
            str(r['upload_id']): {
                'upload_id':
                str(r['upload_id']),
                'upload_date':
                r['upload_date'].date().strftime('%Y-%m-%d'),
                'file_date_start':
                SummarizeByDate.utc_to_local(
                    r['file_date_start']).date().strftime('%Y-%m-%d'),
                'file_date_end':
                SummarizeByDate.utc_to_local(
                    r['file_date_end']).date().strftime('%Y-%m-%d'),
                'uploaded_by_id':
                int(r['uploaded_by_id']),
                'bundle_size':
                int(r['bundle_size']),
                'file_count':
                int(r['file_count']),
                'upload_datetime':
                SummarizeByDate.utc_to_local(
                    r['upload_date']).strftime('%Y-%m-%d %H:%M:%S'),
                'project_id':
                r['project_id'],
                'instrument_id':
                r['instrument_id']
            }
            for r in query.dicts()
        }
Пример #6
0
    def get_last1h_devices(self):
        if scanner.last_scan is None:
            self.chat.reply("⚠️ Scanner is not started yet")

        now = datetime.now()

        results = ScanResult\
            .select()\
            .where(ScanResult.time > now - timedelta(hours=1))\
            .join(Device, JOIN.LEFT_OUTER)\
            .join(Person, JOIN.LEFT_OUTER)\
            .group_by(ScanResult.mac_addr)\
            .having(fn.Max(ScanResult.time) == ScanResult.time)\
            .order_by(-ScanResult.time, NodeList((Person.name, SQL('IS NULL'))), Person.name)

        msg_text = "Active in the last hour devices list\nAs of %s\n" % now.strftime(
            "%Y.%m.%d %X")

        for k, g in groupby(results, lambda x: x.time):
            age = int((now - k).seconds / 60)
            msg_text += "\n<b>%s min ago</b>\n" % str(
                age) if age > 0 else "\n<b>Now</b>\n"
            for r in g:
                if r.device:
                    d = r.device
                    msg_text += "• %s (%s) \n" % (d.owner.name if d.owner else
                                                  "N/A", d.name or "N/A")
                else:
                    msg_text += "• <code>%s</code>\n" % r.mac_addr

        self.chat.reply(
            msg_text,
            parse_mode='HTML',
        )
Пример #7
0
 def get_max_trend_date_by_category(category: list = None):
     result = PTrends.select(fn.Max(PTrends.valid_date)).join(PTorrent)
     if category:
         if not isinstance(category, list):
             category = [category]
         result = result.where((PTorrent.category.in_(category)))
     return result.scalar()
Пример #8
0
 def _get_last_known_transaction():
     txn_id = (TransSIP
               .select(fn.Max(TransSIP.id).alias('id'))
               .where(TransSIP.deleted >> None)
               .dicts()
               .get())
     return {'latest_transaction_id': txn_id['id']}
Пример #9
0
    def get_last_scheduled_datetime(cls) -> datetime:
        last_scheduled_post = Story.select(fn.Max(Story.scheduled_datetime)).scalar()

        if last_scheduled_post:
            return last_scheduled_post
        else:
            return datetime.now()
Пример #10
0
def test_perform_indexing_api_request_failure_index(initialized_db):
    app.config["SECURITY_SCANNER_V4_NAMESPACE_WHITELIST"] = ["devtable"]
    expected_manifests = (
        Manifest.select(fn.Max(Manifest.id))
        .join(Repository)
        .join(User)
        .where(User.username == "devtable")
    )

    secscan = V4SecurityScanner(app, instance_keys, storage)
    secscan._secscan_api = mock.Mock()
    secscan._secscan_api.state.return_value = "abc"
    secscan._secscan_api.index.side_effect = APIRequestFailure()

    next_token = secscan.perform_indexing()

    assert next_token is None
    assert ManifestSecurityStatus.select().count() == 0

    # Set security scanner to return good results and attempt indexing again
    secscan._secscan_api.index.side_effect = None
    secscan._secscan_api.index.return_value = (
        {"err": None, "state": IndexReportState.Index_Finished},
        "abc",
    )

    next_token = secscan.perform_indexing()

    assert next_token.min_id == expected_manifests.scalar() + 1
    assert ManifestSecurityStatus.select().count() == expected_manifests.count()
Пример #11
0
    def perform_indexing_recent_manifests(self, batch_size=None):
        try:
            indexer_state = self._secscan_api.state()
        except APIRequestFailure:
            return None

        if not batch_size:
            batch_size = self.app.config.get("SECURITY_SCANNER_V4_BATCH_SIZE",
                                             0)

        reindex_threshold = datetime.utcnow() - timedelta(
            seconds=self.app.config.get(
                "SECURITY_SCANNER_V4_REINDEX_THRESHOLD", 86400))

        end_index = Manifest.select(fn.Max(Manifest.id)).scalar()
        start_index = max(end_index - batch_size, 1)

        iterator = self._get_manifest_iterator(
            indexer_state,
            start_index,
            end_index,
            batch_size=max(batch_size // 20, 1),
            reindex_threshold=reindex_threshold,
        )

        self._index(iterator, reindex_threshold)
Пример #12
0
def test_manifest_iterator(initialized_db, set_secscan_config, index_status,
                           indexer_state, seconds, expect_zero):
    secscan = V4SecurityScanner(app, instance_keys, storage)

    for manifest in Manifest.select():
        with db_transaction():
            ManifestSecurityStatus.delete().where(
                ManifestSecurityStatus.manifest == manifest,
                ManifestSecurityStatus.repository == manifest.repository,
            ).execute()
            ManifestSecurityStatus.create(
                manifest=manifest,
                repository=manifest.repository,
                error_json={},
                index_status=index_status,
                indexer_hash="old hash",
                indexer_version=IndexerVersion.V4,
                last_indexed=datetime.utcnow() - timedelta(seconds=seconds),
                metadata_json={},
            )

    iterator = secscan._get_manifest_iterator(
        indexer_state,
        Manifest.select(fn.Min(Manifest.id)).scalar(),
        Manifest.select(fn.Max(Manifest.id)).scalar(),
    )

    count = 0
    for candidate, abt, num_remaining in iterator:
        count = count + 1

    if expect_zero:
        assert count == 0
    else:
        assert count != 0
Пример #13
0
    def perform_indexing(self, start_token=None, batch_size=None):
        try:
            indexer_state = self._secscan_api.state()
        except APIRequestFailure:
            return None

        if not batch_size:
            batch_size = self.app.config.get("SECURITY_SCANNER_V4_BATCH_SIZE",
                                             0)

        reindex_threshold = datetime.utcnow() - timedelta(
            seconds=self.app.config.get(
                "SECURITY_SCANNER_V4_REINDEX_THRESHOLD", 86400))

        max_id = Manifest.select(fn.Max(Manifest.id)).scalar()

        start_index = (start_token.min_id if start_token is not None else
                       Manifest.select(fn.Min(Manifest.id)).scalar())

        if max_id is None or start_index is None or start_index > max_id:
            return None

        iterator = self._get_manifest_iterator(
            indexer_state,
            start_index,
            max_id,
            batch_size=batch_size,
            reindex_threshold=reindex_threshold,
        )

        self._index(iterator, reindex_threshold)

        return ScanToken(max_id + 1)
Пример #14
0
def runner():
    last_repo = PuppetFiles.select(fn.Max(PuppetFiles.repositoryName)).scalar()
    repositories = os.listdir(Constants.PUPPET_REPOSITORIES_PATH)
    for repository in repositories:
        if last_repo is not None and repository <= last_repo:
            print("Ignoring " + repository)
        else:
            print("Adding " + repository)
            entries = []
            for path in glob.glob(Constants.PUPPET_REPOSITORIES_PATH + "/" +
                                  repository + "/**/*.pp",
                                  recursive=True):
                entries.append((
                    path,
                    repository,
                ))
                print(path, repository)

            with db.atomic():
                PuppetFiles.insert_many(
                    entries,
                    fields=[PuppetFiles.path,
                            PuppetFiles.repositoryName]).execute()

    files = (PuppetFiles.select().where((PuppetFiles.isAnalyzed == False)))

    return files
Пример #15
0
    def updateVideo(self):
        # fill in general part of Video Info
        self.task_name = self.tasks[self.video.task_id]
        self.video_name = self.video.file_name
        self.video_length = (model.database.Kinematic
                .select()
                .where(model.database.Kinematic.video_id == self.video.id)
                .select(fn.Max(model.database.Kinematic.frame))
                .scalar()
            )

        # physical location of the video file
        file_name = os.path.join(
            self.dir_config,
            'tasks',
            self.task_name,
            'video',
            self.video_name + '_capture2.avi')
        self.load(file_name)

        # update the Gestures tab
        self.gesture_store.clear()
        self.gesture_spans = list()
        for x in model.database.Transcript.select().where(model.database.Transcript.video_id == self.video.id).order_by(model.database.Transcript.start) :
            gesture_store_item = [x.gesture_id, self.gestures[x.gesture_id], x.start, x.end]
            self.gesture_store.append(gesture_store_item)
            self.gesture_spans.append(gesture_store_item)

        # update subject info
        self.label_subject.set_markup('\n'.join([
                          '<b>Subject Code</b>: ' + self.video.file_name.rpartition('_')[2][0],
                          '<b>Trial</b>: ' + self.video.file_name[-1],
                          '<b>Skill Level</b>: ' + model.database.SKILL_LEVELS_DICT[self.video.skill_level].replace('<', '&lt;').replace('>', '&gt;'),
                          '<b>Global Rating Score</b>: {}/30 ({:.2f}%)'.format(self.video.grs_total, int(self.video.grs_total) / 0.30 ),
                          '    <b>Respect for tissue</b>: ' + self.getGrs(self.video.grs_tissue, model.database.GRS_TISSUE),
                          '    <b>Suture/needle handling</b>: ' + self.getGrs(self.video.grs_suture, model.database.GRS_SUTURE),
                          '    <b>Time and motion</b>: ' + self.getGrs(self.video.grs_time, model.database.GRS_TIME),
                          '    <b>Flow of operation</b>: ' + self.getGrs(self.video.grs_flow, model.database.GRS_FLOW),
                          '    <b>Overall performance</b>: ' + self.getGrs(self.video.grs_performance, None),
                          '    <b>Quality of final product</b>: ' + self.getGrs(self.video.grs_quality, None),
                      ]))

        # download kinematics
        self.kinematics = dict()
        self.kinematics_range = dict()
        for x in model.database.Kinematic.select().where(model.database.Kinematic.video_id == self.video.id).order_by(model.database.Kinematic.frame) :
            self.kinematics[x.frame] = x

        colname_i = 0
        for colname in model.kinematics.columns :
            self.kinematics_range[colname] = max(
                -min(getattr(self.kinematics[x],colname) for x in self.kinematics),
                max (getattr(self.kinematics[x],colname) for x in self.kinematics))
            #colname_i += 1
            #print(colname_i)

        self.updateGesturePlot(self.arm_type)

        self.onKspToggled()
Пример #16
0
def get_true_goals():
    """
    获取每个小组净胜球最大的队伍
    """
    teams = Points.select(Points.group, Points.team, fn.Max(Points.true_goal)).where(
        Points.team == Points.team).group_by(Points.group)
    data = [g.get_dict() for g in teams]
    return json_data(data)
Пример #17
0
def get_high_and_average_scores():
    high_score, average_score = (ScoreEntry
                                 .select(fn.Max(ScoreEntry.final_score),
                                         fn.Avg(ScoreEntry.final_score))
                                 .scalar(as_tuple=True))
    if (high_score, average_score) == (None, None):
        high_score, average_score = 0, 0
    return high_score, average_score
Пример #18
0
 def get_max_batch(self, source):
     max_id = webapp_job_batch.select(
         fn.Max(webapp_job_batch.id).alias('max')).where(
             webapp_job_batch.crawler_source == source)
     if (max_id[0].max is None):
         return 1
     else:
         return max_id[0].max + 1
Пример #19
0
    def max_id(cls):
        """
        Get the max id on the table.

        Returns: int
        """

        return cls.select(fn.Max(cls.id)).scalar()
def compute_location_ratings(labels=HAND_LABELED_EVENTS,
                             task_compute_index=None):

    # Create a new index for this computation
    last_compute_index = LocationRating.select(
        fn.Max(LocationRating.compute_index)).scalar() or 0
    compute_index = last_compute_index + 1

    # Determine what will be the compute index of the task periods that ratings are matched to.
    # This will become the latest compute index if it hasn't been specified.
    if task_compute_index is None:
        task_compute_index = TaskPeriod.select(fn.Max(
            TaskPeriod.compute_index)).scalar()

    # Create a list to hold all ratings that couldn't be matched to a task period.
    # At the end, we want to return these, in case it's important for the caller to know
    # which events we couldn't create rating records for.
    unmatched_ratings = []

    for event in LocationEvent.select():

        # Check to see whether this is a rating event
        rating_match = re.match("^Rating: (\d)+$", event.event_type)
        if rating_match:

            # If this is a rating event, extract the rating
            rating = int(rating_match.group(1))
            rating_created = create_location_rating(
                compute_index=compute_index,
                task_compute_index=task_compute_index,
                event=event,
                rating=rating,
                labels=labels,
            )

            # If a rating wasn't created, this probably couldn't be matched to a task.
            # Save a record of which event failed to be matched to a task and which user
            # this event happened for.
            if not rating_created:
                unmatched_ratings.append({
                    'user_id': event.user_id,
                    'event_id': event.id,
                })

    return unmatched_ratings
Пример #21
0
def fetch_boxes(arxiv_id: ArxivId, schema: str, version: Optional[int],
                types: List[str]) -> Optional[RegionsByPageAndType]:
    # Discover the most recent version of data in the database for the paper.

    setup_database_connections(schema)
    if version is None:
        version_number = (Version.select(fn.Max(
            Version.index)).join(Paper).where(
                Paper.arxiv_id == arxiv_id).scalar())
        if version_number is None:
            logging.warning(  # pylint: disable=logging-not-lazy
                "There are no entities for paper %s in database schema %s",
                arxiv_id,
                schema,
            )
            return None
        version = int(version_number)

    # Load bounding boxes from rows in the tables.
    rows = (EntityModel.select(
        EntityModel.id,
        EntityModel.type,
        BoundingBoxModel.left,
        BoundingBoxModel.top,
        BoundingBoxModel.width,
        BoundingBoxModel.height,
        BoundingBoxModel.page,
    ).join(Paper).switch(EntityModel).join(BoundingBoxModel).where(
        EntityModel.version == version,
        Paper.arxiv_id == arxiv_id,
        EntityModel.type << types,
    ).dicts())
    boxes_by_entity_db_id: Dict[str, List[BoundingBox]] = defaultdict(list)
    types_by_entity_db_id: Dict[str, str] = {}
    for row in rows:
        boxes_by_entity_db_id[row["id"]].append(
            BoundingBox(
                row["left"],
                row["top"],
                row["width"],
                row["height"],
                row["page"],
            ))
        types_by_entity_db_id[row["id"]] = row["type"]

    regions: RegionsByPageAndType = defaultdict(list)
    for db_id, bounding_boxes in boxes_by_entity_db_id.items():
        by_page = group_by_page(bounding_boxes)
        for page, page_boxes in by_page.items():
            key = (page, types_by_entity_db_id[db_id])
            rectangles = frozenset([
                FloatRectangle(b.left, b.top, b.width, b.height)
                for b in page_boxes
            ])
            regions[key].append(rectangles)

    return regions
Пример #22
0
def getNextOrderID():
    id = Order.select(fn.Max(Order.id)).scalar()

    if id is None:
        id = 1
    else:
        id += 1

    return id
Пример #23
0
    def _backfill_manifests(self):
        try:
            Manifest.select().where(
                Manifest.layers_compressed_size >> None).get()
        except Manifest.DoesNotExist:
            logger.debug("Manifest backfill worker has completed; skipping")
            return False

        iterator = yield_random_entries(
            lambda: Manifest.select().where(Manifest.layers_compressed_size >>
                                            None),
            Manifest.id,
            250,
            Manifest.select(fn.Max(Manifest.id)).scalar(),
            1,
        )

        for manifest_row, abt, _ in iterator:
            if manifest_row.layers_compressed_size is not None:
                logger.debug("Another worker preempted this worker")
                abt.set()
                continue

            logger.debug("Setting layers compressed size for manifest %s",
                         manifest_row.id)
            layers_compressed_size = -1
            config_media_type = None
            manifest_bytes = Bytes.for_string_or_unicode(
                manifest_row.manifest_bytes)

            try:
                parsed = parse_manifest_from_bytes(
                    manifest_bytes,
                    manifest_row.media_type.name,
                    validate=False)
                layers_compressed_size = parsed.layers_compressed_size
                if layers_compressed_size is None:
                    layers_compressed_size = 0

                config_media_type = parsed.config_media_type or None
            except ManifestException as me:
                logger.warning(
                    "Got exception when trying to parse manifest %s: %s",
                    manifest_row.id, me)

            assert layers_compressed_size is not None
            updated = (Manifest.update(
                layers_compressed_size=layers_compressed_size,
                config_media_type=config_media_type,
            ).where(Manifest.id == manifest_row.id,
                    Manifest.layers_compressed_size >> None).execute())
            if updated != 1:
                logger.debug("Another worker preempted this worker")
                abt.set()
                continue

        return True
Пример #24
0
async def ws_index(request, websocket):
    subscribe(websocket, "jobs")

    # avoid fetch "log" field from the db to reduce memory usage
    selected_fields = (
        Job.id,
        Job.name,
        Job.url_or_path,
        Job.state,
        Job.created_time,
        Job.started_time,
        Job.end_time,
    )

    JobAlias = Job.alias()
    subquery = JobAlias.select(*selected_fields)\
                       .where(JobAlias.state << ("done", "failure", "canceled", "error"))\
                       .group_by(JobAlias.url_or_path)\
                       .select(fn.Max(JobAlias.id).alias("max_id"))

    latest_done_jobs = Job.select(*selected_fields)\
                          .join(subquery, on=(Job.id == subquery.c.max_id))\
                          .order_by(-Job.id)

    subquery = JobAlias.select(*selected_fields)\
                       .where(JobAlias.state == "scheduled")\
                       .group_by(JobAlias.url_or_path)\
                       .select(fn.Min(JobAlias.id).alias("min_id"))

    next_scheduled_jobs = Job.select(*selected_fields)\
                             .join(subquery, on=(Job.id == subquery.c.min_id))\
                             .order_by(-Job.id)

    # chunks initial data by batch of 30 to avoid killing firefox
    data = chunks(
        itertools.chain(
            map(model_to_dict, next_scheduled_jobs.iterator()),
            map(model_to_dict,
                Job.select().where(Job.state == "running").iterator()),
            map(model_to_dict, latest_done_jobs.iterator())), 30)

    first_chunck = next(data)

    await websocket.send(
        ujson.dumps({
            "action": "init_jobs",
            "data": first_chunck,  # send first chunk
        }))

    for chunk in data:
        await websocket.send(
            ujson.dumps({
                "action": "init_jobs_stream",
                "data": chunk,
            }))

    await websocket.wait_closed()
Пример #25
0
 def last_change_date(cls):
     """Find the last changed date for the object."""
     # pylint: disable=no-value-for-parameter
     last_change_date = cls.select(fn.Max(cls.updated)).scalar()
     # pylint: enable=no-value-for-parameter
     last_change_string = last_change_date \
         if last_change_date is not None else '1970-01-01 00:00:00'
     last_change_string = last_change_date.isoformat(' ') \
         if isinstance(last_change_date, datetime.datetime) else parser.parse(last_change_string).isoformat(' ')
     return text_type(last_change_string)
Пример #26
0
def get_last_refreshed_on_time():
    """
    Time of the most recently *refreshed* feed
    """
    last_checked_on = Feed.select().aggregate(fn.Max(Feed.last_checked_on))
    if last_checked_on:
        return datetime_as_epoch(last_checked_on)

    # Return a fallback value
    return datetime_as_epoch(datetime.utcnow())
Пример #27
0
def index():
    max_timestamp = ImageInfo.select(fn.Max(ImageInfo.timestamp)).scalar()
    # truncate the datetime obj
    start_time = max_timestamp.replace(minute=0,
                                       hour=0,
                                       second=0,
                                       microsecond=0)
    end_time = start_time + timedelta(days=1)
    result_tuples = generate_bar_chart_tuples(start_time, end_time)
    return render_template('bar_chart.html', result_tuples=result_tuples)
Пример #28
0
def get_citation_count_for_queries(queries, api_key):

    # Create a new fetch index.
    last_fetch_index = Publication.select(
        fn.Max(Publication.fetch_index)).scalar() or 0
    fetch_index = last_fetch_index + 1

    for query in queries:

        # Fetch the citation count!
        get_citation_count(query, fetch_index, api_key)
Пример #29
0
def get_timepaths_for_dataset(dataset, limit=10):
    '''Identify all time paths in a dataset. Returns all of them if there are 10 or less. Returns min/max/count if more than 10.'''
    all_paths = DataSet.select().where(DataSet.name==dataset.name, DataSet.project==dataset.project, DataSet.metaarg_guid==dataset.metaarg_guid)
    total_records = all_paths.count()
    if total_records < limit:
        timepaths = list(all_paths.order_by(DataSet.timepath).select(DataSet.timepath).tuples())
        timepaths = [t[0] for t in timepaths]
        return {'allpaths': timepaths}
    else:
        min_value, max_value = timepaths.select(fn.Min(DataSet.timepath), fn.Max(DataSet.timepath)).scalar(as_tuple=True)
        return {'cnt': total_records, 'min_value': min_value, 'max_value': max_value}
Пример #30
0
def main(tags, *args, **kwargs):

    # Create a new fetch index.
    last_fetch_index = QuestionSnapshot.select(
        fn.Max(QuestionSnapshot.fetch_index)).scalar() or 0
    fetch_index = last_fetch_index + 1

    with open(tags) as tag_file:
        tag_list = [t.strip() for t in tag_file.readlines()]

    for tag in tag_list:
        fetch_questions_for_tag(tag, fetch_index)