Пример #1
0
    def get_transaction_list_details(transaction_list):
        """Return complete data set on a specified transaction."""
        query = (Files().select(
            Files.transaction.alias('upload_id'),
            fn.Max(Transactions.updated).alias('upload_date'),
            fn.Min(Files.mtime).alias('file_date_start'),
            fn.Max(Files.mtime).alias('file_date_end'),
            fn.Min(Transactions.submitter).alias('uploaded_by_id'),
            fn.Sum(Files.size).alias('bundle_size'),
            fn.Count(Files.id).alias('file_count'),
            fn.Min(Transactions.updated).alias('upload_datetime'),
            fn.Min(Transactions.proposal).alias('proposal_id'),
            fn.Min(Transactions.instrument).alias('instrument_id')).join(
                Transactions).where(
                    Files.transaction << transaction_list).group_by(
                        Files.transaction))

        return {
            str(r['upload_id']): {
                'upload_id': str(r['upload_id']),
                'upload_date': r['upload_date'].date().strftime('%Y-%m-%d'),
                'file_date_start':
                r['file_date_start'].date().strftime('%Y-%m-%d'),
                'file_date_end':
                r['file_date_end'].date().strftime('%Y-%m-%d'),
                'uploaded_by_id': int(r['uploaded_by_id']),
                'bundle_size': int(r['bundle_size']),
                'file_count': int(r['file_count']),
                'upload_datetime':
                r['upload_date'].strftime('%Y-%m-%d %H:%M:%S'),
                'proposal_id': r['proposal_id'],
                'instrument_id': r['instrument_id']
            }
            for r in query.dicts()
        }
Пример #2
0
    def _get_earliest_latest(item_type, item_list, time_basis):
        accepted_item_types = list(
            set(
                list(QueryBase.object_type_mappings.keys()) +
                list(QueryBase.object_type_mappings.values())))
        accepted_time_basis_types = [
            'submitted',
            'modified',
            'created',
            'submit',
            'modified',
            'create',
            'submit_time',
            'modified_time',
            'create_time',
            'submitted_date',
            'modified_date',
            'created_date',
        ]
        item_type = QueryBase.object_type_mappings.get(item_type)
        time_basis = time_basis.lower()
        if item_type not in accepted_item_types or time_basis not in accepted_time_basis_types:
            raise HTTPError('400 Invalid Query')

        short_time_basis = time_basis[:5]

        time_basis = {
            'submi': lambda x: 'submitted',
            'modif': lambda x: 'modified',
            'creat': lambda x: 'created'
        }[short_time_basis](short_time_basis)
        search_field = getattr(TransSIP, '{0}'.format(item_type))
        if time_basis == 'submitted':
            query = TransSIP().select(
                fn.Min(TransSIP.updated).alias('earliest'),
                fn.Max(TransSIP.updated).alias('latest'),
            )
        if time_basis in ['modified', 'created']:
            time_basis_field = getattr(Files, '{0}time'.format(time_basis[:1]))
            query = Files().select(
                fn.Min(time_basis_field).alias('earliest'),
                fn.Max(time_basis_field).alias('latest'),
            ).join(TransSIP, on=(TransSIP.id == Files.transaction))

        query = query.where(search_field << item_list)
        row = query.get()
        if row.earliest is None or row.latest is None:
            message = ''
            raise HTTPError('404 Not Found', message)

        return {
            'earliest': row.earliest.strftime('%Y-%m-%d %H:%M:%S'),
            'latest': row.latest.strftime('%Y-%m-%d %H:%M:%S')
        }
    def get_transaction_list_details(transaction_list):
        """Return complete data set on a specified transaction."""
        # pylint: disable=no-member
        query = (Files().select(
            Files.transaction.alias('upload_id'),
            fn.Max(TransSIP.updated).alias('upload_date'),
            fn.Min(Files.mtime).alias('file_date_start'),
            fn.Max(Files.mtime).alias('file_date_end'),
            fn.Min(TransSIP.submitter).alias('uploaded_by_id'),
            fn.Sum(Files.size).alias('bundle_size'),
            fn.Count(Files.id).alias('file_count'),
            fn.Min(TransSIP.updated).alias('upload_datetime'),
            fn.Min(TransSIP.project).alias('project_id'),
            fn.Min(TransSIP.instrument).alias('instrument_id')).join(
                TransSIP,
                on=(TransSIP.id == Files.transaction
                    )).where(Files.transaction << transaction_list).group_by(
                        Files.transaction))
        # pylint: enable=no-member

        return {
            str(r['upload_id']): {
                'upload_id':
                str(r['upload_id']),
                'upload_date':
                r['upload_date'].date().strftime('%Y-%m-%d'),
                'file_date_start':
                SummarizeByDate.utc_to_local(
                    r['file_date_start']).date().strftime('%Y-%m-%d'),
                'file_date_end':
                SummarizeByDate.utc_to_local(
                    r['file_date_end']).date().strftime('%Y-%m-%d'),
                'uploaded_by_id':
                int(r['uploaded_by_id']),
                'bundle_size':
                int(r['bundle_size']),
                'file_count':
                int(r['file_count']),
                'upload_datetime':
                SummarizeByDate.utc_to_local(
                    r['upload_date']).strftime('%Y-%m-%d %H:%M:%S'),
                'project_id':
                r['project_id'],
                'instrument_id':
                r['instrument_id']
            }
            for r in query.dicts()
        }
Пример #4
0
def test_manifest_iterator(initialized_db, set_secscan_config, index_status,
                           indexer_state, seconds, expect_zero):
    secscan = V4SecurityScanner(app, instance_keys, storage)

    for manifest in Manifest.select():
        with db_transaction():
            ManifestSecurityStatus.delete().where(
                ManifestSecurityStatus.manifest == manifest,
                ManifestSecurityStatus.repository == manifest.repository,
            ).execute()
            ManifestSecurityStatus.create(
                manifest=manifest,
                repository=manifest.repository,
                error_json={},
                index_status=index_status,
                indexer_hash="old hash",
                indexer_version=IndexerVersion.V4,
                last_indexed=datetime.utcnow() - timedelta(seconds=seconds),
                metadata_json={},
            )

    iterator = secscan._get_manifest_iterator(
        indexer_state,
        Manifest.select(fn.Min(Manifest.id)).scalar(),
        Manifest.select(fn.Max(Manifest.id)).scalar(),
    )

    count = 0
    for candidate, abt, num_remaining in iterator:
        count = count + 1

    if expect_zero:
        assert count == 0
    else:
        assert count != 0
Пример #5
0
def get_min_id_for_sec_scan(version):
    """
    Gets the minimum id for a security scanning.
    """
    return _tag_alive(
        RepositoryTag.select(fn.Min(RepositoryTag.id)).join(Image).where(
            Image.security_indexed_engine < version)).scalar()
Пример #6
0
    def perform_indexing(self, start_token=None, batch_size=None):
        try:
            indexer_state = self._secscan_api.state()
        except APIRequestFailure:
            return None

        if not batch_size:
            batch_size = self.app.config.get("SECURITY_SCANNER_V4_BATCH_SIZE",
                                             0)

        reindex_threshold = datetime.utcnow() - timedelta(
            seconds=self.app.config.get(
                "SECURITY_SCANNER_V4_REINDEX_THRESHOLD", 86400))

        max_id = Manifest.select(fn.Max(Manifest.id)).scalar()

        start_index = (start_token.min_id if start_token is not None else
                       Manifest.select(fn.Min(Manifest.id)).scalar())

        if max_id is None or start_index is None or start_index > max_id:
            return None

        iterator = self._get_manifest_iterator(
            indexer_state,
            start_index,
            max_id,
            batch_size=batch_size,
            reindex_threshold=reindex_threshold,
        )

        self._index(iterator, reindex_threshold)

        return ScanToken(max_id + 1)
Пример #7
0
def cheapest_dish() -> models.Dish:
    """You want to get food on a budget
    Query the database to retrieve the cheapest dish available
    """
    query = (models.Dish
             .select(fn.Min(models.Dish.price_in_cents))
             )
    return query
Пример #8
0
def get_stale_logs_start_id(model):
    """
    Gets the oldest log entry.
    """
    try:
        return (model.select(fn.Min(model.id)).tuples())[0][0]
    except IndexError:
        return None
Пример #9
0
def project(name, region=None):
    project = Project.get(Project.name == name)
    desc = project.description.replace('\n', '<br>')
    cnt = Feature.select(Feature.id).where(Feature.project == project)
    val1 = Feature.select(Feature.id).where(Feature.project == project,
                                            Feature.validates_count > 0)
    val2 = Feature.select(Feature.id).where(Feature.project == project,
                                            Feature.validates_count >= 2)
    corrected = Feature.select(Feature.id).where(Feature.project == project,
                                                 Feature.audit.is_null(False),
                                                 Feature.audit != '')
    skipped = Feature.select(Feature.id).where(
        Feature.project == project, Feature.audit.contains('"skip": true'))

    if region is not None:
        val1 = val1.where(Feature.region == region)
        val2 = val2.where(Feature.region == region)
        cnt = cnt.where(Feature.region == region)
        corrected = corrected.where(Feature.region == region)
        skipped = skipped.where(Feature.region == region)
    if project.validate_modified:
        val1 = val1.where(Feature.action == 'm')
        val2 = val2.where(Feature.action == 'm')
        cnt = cnt.where(Feature.action == 'm')

    regions = []
    if project.regional:
        regions = Feature.select(
            Feature.region,
            fn.Count(),
            # fn.Sum(Case(None, [(Feature.validates_count >= 1, 1)], 0))).where(
            fn.Sum(fn.Min(Feature.validates_count, 1)
                   )).where(Feature.project == project).group_by(
                       Feature.region).order_by(Feature.region).tuples()
        if len(regions) == 1:
            regions = []
        else:
            regions = [(None, cnt.count(), val1.count())] + list(regions)

    user = get_user()
    if user:
        has_skipped = Task.select().join(Feature).where(
            Task.user == user, Task.skipped == True, Feature.project
            == project).count() > 0
    else:
        has_skipped = False
    return render_template('project.html',
                           project=project,
                           admin=is_admin(user, project),
                           count=cnt.count(),
                           desc=desc,
                           val1=val1.count(),
                           val2=val2.count(),
                           corrected=corrected.count(),
                           skipped=skipped.count(),
                           has_skipped=has_skipped,
                           region=region,
                           regions=regions)
Пример #10
0
async def ws_index(request, websocket):
    subscribe(websocket, "jobs")

    # avoid fetch "log" field from the db to reduce memory usage
    selected_fields = (
        Job.id,
        Job.name,
        Job.url_or_path,
        Job.state,
        Job.created_time,
        Job.started_time,
        Job.end_time,
    )

    JobAlias = Job.alias()
    subquery = JobAlias.select(*selected_fields)\
                       .where(JobAlias.state << ("done", "failure", "canceled", "error"))\
                       .group_by(JobAlias.url_or_path)\
                       .select(fn.Max(JobAlias.id).alias("max_id"))

    latest_done_jobs = Job.select(*selected_fields)\
                          .join(subquery, on=(Job.id == subquery.c.max_id))\
                          .order_by(-Job.id)

    subquery = JobAlias.select(*selected_fields)\
                       .where(JobAlias.state == "scheduled")\
                       .group_by(JobAlias.url_or_path)\
                       .select(fn.Min(JobAlias.id).alias("min_id"))

    next_scheduled_jobs = Job.select(*selected_fields)\
                             .join(subquery, on=(Job.id == subquery.c.min_id))\
                             .order_by(-Job.id)

    # chunks initial data by batch of 30 to avoid killing firefox
    data = chunks(
        itertools.chain(
            map(model_to_dict, next_scheduled_jobs.iterator()),
            map(model_to_dict,
                Job.select().where(Job.state == "running").iterator()),
            map(model_to_dict, latest_done_jobs.iterator())), 30)

    first_chunck = next(data)

    await websocket.send(
        ujson.dumps({
            "action": "init_jobs",
            "data": first_chunck,  # send first chunk
        }))

    for chunk in data:
        await websocket.send(
            ujson.dumps({
                "action": "init_jobs_stream",
                "data": chunk,
            }))

    await websocket.wait_closed()
Пример #11
0
def get_summary():
    """
    Fetch top level categories from the database.
    """
    # pylint: disable=no-value-for-parameter
    depth = Category.select(fn.Min(Category.category_level)).scalar()
    return CategoryCollectionDto(categories=[category_to_dto(category) \
        for category in Category.select().where(Category.category_level == depth) \
            if not category.expired])
Пример #12
0
def get_timepaths_for_dataset(dataset, limit=10):
    '''Identify all time paths in a dataset. Returns all of them if there are 10 or less. Returns min/max/count if more than 10.'''
    all_paths = DataSet.select().where(DataSet.name==dataset.name, DataSet.project==dataset.project, DataSet.metaarg_guid==dataset.metaarg_guid)
    total_records = all_paths.count()
    if total_records < limit:
        timepaths = list(all_paths.order_by(DataSet.timepath).select(DataSet.timepath).tuples())
        timepaths = [t[0] for t in timepaths]
        return {'allpaths': timepaths}
    else:
        min_value, max_value = timepaths.select(fn.Min(DataSet.timepath), fn.Max(DataSet.timepath)).scalar(as_tuple=True)
        return {'cnt': total_records, 'min_value': min_value, 'max_value': max_value}
Пример #13
0
def filter_form(model, as_json=False):
    '''
    Constructs a form for filtering from a model.
    Setting `json` to true will output compact JSON.
    '''

    form = {}

    if not hasattr(model, '_filter'):
        raise Exception('Model does not have _filter property.')

    for fname in model._filter:
        field = model._meta.fields[fname]
        leg = legend(fname)

        if isinstance(field, ForeignKeyField):
            rel_model = field.rel_model
            rows = rel_model.select(rel_model.orderfield(), rel_model.keyfield()).order_by(rel_model.orderfield())
            options = {r.get_name() : r.get_key() for r in rows}
            form[fname] = {
                    'type' : 'multiple',
                    'legend' : leg,
                    'exclusive' : False,
                    'options' : options
                    }

        elif isinstance(field, IntegerField):
            # Get min and max values
            min, max = model.select(
                    fn.Min(field), fn.Max(field)
                    ).scalar(as_tuple=True)

            if max-min > 5:
                form[fname] = {
                        'type' : 'int_range',
                        'legend' : leg,
                        'min' : min,
                        'max' : max,
                        'step' : 1,
                        }
            else:
                options = {str(i):i for i in range(min, max+1)}
                form[fname] = {
                        'type' : 'multiple',
                        'legend' : leg,
                        'exclusive' : False,
                        'options' : options
                        }

    return json.dumps(form, sort_keys=True, separators=(',',':')) if as_json else form
Пример #14
0
def analytics(args, ticker):
    start, end = StockPricesModel.select(fn.Min(
        StockPricesModel.date), fn.Max(StockPricesModel.date)).where(
            StockPricesModel.date >= args['date_from'],
            StockPricesModel.date <= args['date_to']).scalar(as_tuple=True)

    self = StockPricesModel.alias()
    res = StockPricesModel.select(
        self.open - StockPricesModel.open, self.high - StockPricesModel.high,
        self.low - StockPricesModel.low,
        self.close - StockPricesModel.close).join(self, on=True).where(
            StockPricesModel.company == ticker, StockPricesModel.date == start,
            self.company == ticker, self.date == end).scalar(as_tuple=True)

    return res
Пример #15
0
    def _candidates_to_backfill(self):
        def missing_tmt_query():
            return (
                self._filter(RepositoryTag.select())
                .join(TagToRepositoryTag, JOIN.LEFT_OUTER)
                .where(TagToRepositoryTag.id >> None, RepositoryTag.hidden == False)
            )

        min_id = self._filter(RepositoryTag.select(fn.Min(RepositoryTag.id))).scalar()
        max_id = self._filter(RepositoryTag.select(fn.Max(RepositoryTag.id))).scalar()

        logger.info("Found candidate range %s-%s", min_id, max_id)

        iterator = yield_random_entries(missing_tmt_query, RepositoryTag.id, 1000, max_id, min_id,)

        return iterator
Пример #16
0
Файл: gc.py Проект: kleesc/quay
def _chunk_delete_all(repo, model, force=False, chunk_size=500):
    """ Deletes all rows referencing the given repository in the given model. """
    assert repo.state == RepositoryState.MARKED_FOR_DELETION or force

    while True:
        min_id = model.select(fn.Min(
            model.id)).where(model.repository == repo).scalar()
        if min_id is None:
            return

        max_id = (model.select(fn.Max(model.id)).where(
            model.repository == repo, model.id <=
            (min_id + chunk_size)).scalar())
        if min_id is None or max_id is None or min_id > max_id:
            return

        model.delete().where(model.repository == repo, model.id >= min_id,
                             model.id <= max_id).execute()
Пример #17
0
    def get_transaction_date_range_details(start_date, end_date):
        """Return a transaction set grouped on instrument and project for a given time span."""
        # pylint: disable=no-member

        transsip_alias = TransSIP.alias()

        subquery = (transsip_alias.select(
            transsip_alias.id, transsip_alias.project,
            transsip_alias.instrument, transsip_alias.updated).where(
                transsip_alias.updated >= start_date).where(
                    transsip_alias.updated <= end_date).alias('data_subselect')
                    )

        transaction_query = (TransSIP().select(
            fn.Count(TransSIP.id).alias('transaction_count'),
            fn.Min(TransSIP.updated).alias('earliest_upload_date'),
            fn.Max(TransSIP.updated).alias('latest_upload_date'),
            TransSIP.project.alias('project_id'),
            TransSIP.instrument.alias('instrument_id')).join(
                subquery, on=((TransSIP.id == subquery.c.id_id))).group_by(
                    TransSIP.project,
                    TransSIP.instrument).order_by(TransSIP.project,
                                                  TransSIP.instrument))

        # pylint: enable=no-member
        transaction_results = defaultdict(dict)

        for rec in transaction_query.dicts():
            transaction_results[rec['project_id']][rec['instrument_id']] = {
                'transaction_count':
                int(rec['transaction_count']),
                'upload_date_start':
                SummarizeByDate.utc_to_local(
                    rec['earliest_upload_date']).date().strftime('%Y-%m-%d'),
                'upload_date_end':
                SummarizeByDate.utc_to_local(
                    rec['latest_upload_date']).date().strftime('%Y-%m-%d'),
                'project_id':
                rec['project_id'],
                'instrument_id':
                rec['instrument_id']
            }

        return transaction_results
Пример #18
0
    def _candidates_to_backfill(self):
        def missing_tmt_query():
            return (TagManifestLabel.select().join(
                TagManifestLabelMap,
                JOIN.LEFT_OUTER).where(TagManifestLabelMap.id >> None))

        min_id = (TagManifestLabel.select(fn.Min(TagManifestLabel.id)).join(
            TagManifestLabelMap,
            JOIN.LEFT_OUTER).where(TagManifestLabelMap.id >> None).scalar())
        max_id = TagManifestLabel.select(fn.Max(TagManifestLabel.id)).scalar()

        iterator = yield_random_entries(
            missing_tmt_query,
            TagManifestLabel.id,
            100,
            max_id,
            min_id,
        )

        return iterator
Пример #19
0
 def user_add_set(self, set_score, variables):
     """Output set statistics and prompt the user to add the set."""
     set_dict = dict({
         'score': set_score,
         'scoring_fn': self.score_expression
     }.items() + variables.items())
     message("Set statistics:\n - " +
             "\n - ".join(utils.fmtkv(k, v) for k, v in set_dict.items()))
     if self.force or (not self.force and click.confirm(
             "Add set to database?", default=True)):
         # User-provided sets have negative numbers, so we find the
         # smallest and decrement by 1
         min_set_id = Set.select(fn.Min(Set._id)).scalar()
         # This is None if there are no other sets yet
         if min_set_id is None:
             min_set_id = 0
         set_id = min_set_id - 1
         add_set = True
     else:
         add_set = False
     return add_set, set_id
def check():

    user_ids_list = list(AggData.select(AggData.user_id).distinct())
    shuffle(user_ids_list)
    chosens = user_ids_list[:10]

    for user_id in chosens:
        agg_data = AggData.get(user_id=user_id)

        balance = RawData.select(fn.Sum(
            RawData.amount)).where(RawData.user_id == user_id).scalar()
        assert (balance == agg_data.balance)

        event_number = RawData.select(
            RawData.event_id).where(RawData.user_id == user_id).count()
        assert (event_number == agg_data.event_number)

        best_event = RawData.select(fn.Max(
            RawData.amount)).where(RawData.user_id == user_id).scalar()
        assert (best_event == agg_data.best_event)

        worst_event = RawData.select(fn.Min(
            RawData.amount)).where(RawData.user_id == user_id).scalar()
        assert (worst_event == agg_data.worst_event)
Пример #21
0
def get_minimum_user_id():
    return User.select(fn.Min(User.id)).tuples().get()[0]
Пример #22
0
    def perform_indexing(self, start_token=None):
        try:
            indexer_state = self._secscan_api.state()
        except APIRequestFailure:
            return None

        min_id = (start_token.min_id if start_token is not None else
                  Manifest.select(fn.Min(Manifest.id)).scalar())
        max_id = Manifest.select(fn.Max(Manifest.id)).scalar()

        if max_id is None or min_id is None or min_id > max_id:
            return None

        iterator = self._get_manifest_iterator(indexer_state, min_id, max_id)

        def mark_manifest_unsupported(manifest):
            with db_transaction():
                ManifestSecurityStatus.delete().where(
                    ManifestSecurityStatus.manifest == manifest._db_id,
                    ManifestSecurityStatus.repository ==
                    manifest.repository._db_id,
                ).execute()
                ManifestSecurityStatus.create(
                    manifest=manifest._db_id,
                    repository=manifest.repository._db_id,
                    index_status=IndexStatus.MANIFEST_UNSUPPORTED,
                    indexer_hash="none",
                    indexer_version=IndexerVersion.V4,
                    metadata_json={},
                )

        for candidate, abt, num_remaining in iterator:
            manifest = ManifestDataType.for_manifest(candidate, None)
            if manifest.is_manifest_list:
                mark_manifest_unsupported(manifest)
                continue

            layers = registry_model.list_manifest_layers(
                manifest, self.storage, True)
            if layers is None or len(layers) == 0:
                logger.warning(
                    "Cannot index %s/%s@%s due to manifest being invalid (manifest has no layers)"
                    % (
                        candidate.repository.namespace_user,
                        candidate.repository.name,
                        manifest.digest,
                    ))
                mark_manifest_unsupported(manifest)
                continue

            logger.debug("Indexing %s/%s@%s" %
                         (candidate.repository.namespace_user,
                          candidate.repository.name, manifest.digest))

            try:
                (report, state) = self._secscan_api.index(manifest, layers)
            except InvalidContentSent as ex:
                mark_manifest_unsupported(manifest)
                logger.exception(
                    "Failed to perform indexing, invalid content sent")
                return None
            except APIRequestFailure as ex:
                logger.exception(
                    "Failed to perform indexing, security scanner API error")
                return None

            with db_transaction():
                ManifestSecurityStatus.delete().where(
                    ManifestSecurityStatus.manifest == candidate).execute()
                ManifestSecurityStatus.create(
                    manifest=candidate,
                    repository=candidate.repository,
                    error_json=report["err"],
                    index_status=(IndexStatus.FAILED if report["state"]
                                  == IndexReportState.Index_Error else
                                  IndexStatus.COMPLETED),
                    indexer_hash=state,
                    indexer_version=IndexerVersion.V4,
                    metadata_json={},
                )

        return ScanToken(max_id + 1)
Пример #23
0
 def get_min_max_dates(cls):
     return cls.select(fn.Min(cls.updated), fn.Max(cls.updated)).scalar(as_tuple=True)
Пример #24
0
 def first(cls):
     return cls.select(fn.Min(cls.order)).scalar() or 0
Пример #25
0
    def run(self):
        self.summary_msg = summary_template
        self.best_set_desc = best_set_desc
        avg_fg_bind, avg_bg_bind, nprimers = (Primer.select(
            fn.Avg(Primer.fg_freq), fn.Avg(Primer.bg_freq),
            fn.Count(Primer.seq)).scalar(as_tuple=True))

        if (avg_fg_bind is None) or (avg_bg_bind is None):
            (avg_fg_bind, avg_bg_bind) = (0, 0)

        fg_bind_ratio = avg_fg_bind / float(self.fg_length)
        bg_bind_ratio = avg_bg_bind / float(self.bg_length)
        nactive = Primer.select().where(Primer.active == True).count()

        min_tm, max_tm, avg_tm = (Primer.select(fn.Min(
            Primer.tm), fn.Max(Primer.tm), fn.Avg(
                Primer.tm)).where(Primer.active == True).scalar(as_tuple=True))

        nsets = Set.select(fn.Count(Set._id)).scalar()

        if nsets > 0:
            bs = Set.select().order_by(Set.score).limit(1).get()
            bs_primers = ", ".join(bs.primer_seqs()).strip()
            best_set = bs._id
            bs_size = bs.set_size
            bs_score = bs.score
            bs_stats = "- " + "\n - ".join(
                fmtkv(k, bs.__dict__['_data'][k])
                for k in bs.exported_fields()
                if k not in ["_id", "pids", "score", "primers"])
            self.best_set_desc = self.best_set_desc.format(**locals())

        if_no_primers_msg = click.style(
            "Run `swga count` to find possible primers."
            if nprimers == 0 else "",
            fg='green')
        if_no_active_primers_msg = click.style(
            "Run `swga filter` to identify primers to use."
            if nactive == 0 else "",
            fg='green')
        melting_tmp_msg = (
            "The melting temp of the primers ranges between {min_tm:.2f}C and "
            "{max_tm:.2f}C with an average of {avg_tm:.2f}C."
            if nactive > 0 and min_tm and max_tm else
            "No melting temps have been calculated yet.").format(**locals())
        ifzero_sets_msg = click.style(
            "Run `swga find_sets` after identifying valid primers to begin "
            "collecting sets.\n",
            fg='green')

        set_msg = (self.best_set_desc if nsets > 0 else ifzero_sets_msg)

        primer_db = os.path.abspath(self.primer_db)
        nprimers = click.style(str(nprimers), bold=True, fg='blue')
        nactive = click.style(str(nactive), bold=True, fg='blue')
        nsets = click.style(str(nsets), bold=True, fg='blue')

        self.header = click.style("swga v{}".format(__version__), fg='green')

        # Copy all the relevant values into one dict
        values = self.__dict__.copy()
        values.update(locals())

        # Format the summary message with all the calculated values

        self.summary_msg = self.summary_msg.format(**values)
        click.echo(quote(self.summary_msg, quote="  ", width=200))
Пример #26
0
    def perform_indexing(self, start_token=None):
        whitelisted_namespaces = self.app.config.get(
            "SECURITY_SCANNER_V4_NAMESPACE_WHITELIST", [])
        try:
            indexer_state = self._secscan_api.state()
        except APIRequestFailure:
            return None

        def eligible_manifests(base_query):
            return (base_query.join(Repository).join(User).where(
                User.username << whitelisted_namespaces))

        min_id = (start_token.min_id if start_token is not None else
                  Manifest.select(fn.Min(Manifest.id)).scalar())
        max_id = Manifest.select(fn.Max(Manifest.id)).scalar()

        if max_id is None or min_id is None or min_id > max_id:
            return None

        reindex_threshold = lambda: datetime.utcnow() - timedelta(
            seconds=self.app.config.get("SECURITY_SCANNER_V4_REINDEX_THRESHOLD"
                                        ))

        # TODO(alecmerdler): Filter out any `Manifests` that are still being uploaded
        def not_indexed_query():
            return (eligible_manifests(
                Manifest.select()).switch(Manifest).join(
                    ManifestSecurityStatus,
                    JOIN.LEFT_OUTER).where(ManifestSecurityStatus.id >> None))

        def index_error_query():
            return (eligible_manifests(Manifest.select()).switch(
                Manifest).join(ManifestSecurityStatus).where(
                    ManifestSecurityStatus.index_status == IndexStatus.FAILED,
                    ManifestSecurityStatus.last_indexed < reindex_threshold(),
                ))

        def needs_reindexing_query(indexer_hash):
            return (eligible_manifests(Manifest.select()).switch(
                Manifest).join(ManifestSecurityStatus).where(
                    ManifestSecurityStatus.indexer_hash != indexer_hash,
                    ManifestSecurityStatus.last_indexed < reindex_threshold(),
                ))

        # 4^log10(total) gives us a scalable batch size into the billions.
        batch_size = int(4**log10(max(10, max_id - min_id)))

        iterator = itertools.chain(
            yield_random_entries(
                not_indexed_query,
                Manifest.id,
                batch_size,
                max_id,
                min_id,
            ),
            yield_random_entries(
                index_error_query,
                Manifest.id,
                batch_size,
                max_id,
                min_id,
            ),
            yield_random_entries(
                lambda: needs_reindexing_query(indexer_state.get("state", "")),
                Manifest.id,
                batch_size,
                max_id,
                min_id,
            ),
        )

        for candidate, abt, num_remaining in iterator:
            manifest = ManifestDataType.for_manifest(candidate, None)
            layers = registry_model.list_manifest_layers(
                manifest, self.storage, True)

            logger.debug("Indexing %s/%s@%s" %
                         (candidate.repository.namespace_user,
                          candidate.repository.name, manifest.digest))

            try:
                (report, state) = self._secscan_api.index(manifest, layers)
            except APIRequestFailure:
                logger.exception(
                    "Failed to perform indexing, security scanner API error")
                return None

            with db_transaction():
                ManifestSecurityStatus.delete().where(
                    ManifestSecurityStatus.manifest == candidate).execute()
                ManifestSecurityStatus.create(
                    manifest=candidate,
                    repository=candidate.repository,
                    error_json=report["err"],
                    index_status=(IndexStatus.FAILED if report["state"]
                                  == IndexReportState.Index_Error else
                                  IndexStatus.COMPLETED),
                    indexer_hash=state,
                    indexer_version=IndexerVersion.V4,
                    metadata_json={},
                )

        return ScanToken(max_id + 1)
Пример #27
0
    remarks = TextField(null=True)


SBDAPIMessage.create_table(True)
for i in [2, 3, 4, 5]:
    try:
        SBDAPIMessage.insert(unique_id=i,
                             status=1,
                             tailnum='PA-001',
                             ack=1,
                             payload="xxyyy").execute()
        print "simpan id =", i
    except:
        print "id sudah ada"

semua = SBDAPIMessage.select(
    SBDAPIMessage.unique_id).where(SBDAPIMessage.unique_id > 0)
last = SBDAPIMessage.select(SBDAPIMessage.unique_id).where(
    SBDAPIMessage.unique_id > 0).aggregate(fn.Max(SBDAPIMessage.unique_id))
start = SBDAPIMessage.select(SBDAPIMessage.unique_id).where(
    SBDAPIMessage.unique_id > 0).aggregate(fn.Min(SBDAPIMessage.unique_id))
count = SBDAPIMessage.select(SBDAPIMessage.unique_id).where(
    SBDAPIMessage.unique_id > 0).aggregate(fn.count(SBDAPIMessage.unique_id))
print "==============================="
print "tampilkan semua id :"
for satu in semua:
    print satu.unique_id
print "==============================="
print "fn.max = ", last
print "fn.min =", start
print "fn.count", count
Пример #28
0
Файл: image.py Проект: quay/quay
def get_min_id_for_sec_scan(version):
    """
    Gets the minimum id for a clair sec scan.
    """
    return Image.select(fn.Min(Image.id)).where(Image.security_indexed_engine < version).scalar()
Пример #29
0
def get_min_id():
    """
    Gets the minimum id for repository.
    """
    return Repository.select(fn.Min(Repository.id)).scalar()
Пример #30
0
def get_min_id_for_repo_mirror_config():
    """
    Gets the minimum id for a repository mirroring.
    """
    return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()