示例#1
0
    def selectables(cls, bag, agg_spec):
        """ Create a list of statements from spec

        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: list[sqlalchemy.sql.elements.ColumnElement]
        """
        # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/
        selectables = []
        for comp_field, comp_expression in agg_spec.items():
            # Column reference
            if isinstance(comp_expression, basestring):
                selectables.append(bag.columns[comp_expression].label(comp_field))
                continue

            # Computed expression
            assert isinstance(comp_expression, dict), 'Aggregate: Expression should be either a column name, or an object'
            assert len(comp_expression) == 1, 'Aggregate: expression can only contain a single operator'
            operator, expression = comp_expression.popitem()

            # Expression statement
            if isinstance(expression, int) and operator == '$sum':
                # Special case for count
                expression_stmt = expression
            elif isinstance(expression, basestring):
                # Column name
                expression_stmt = bag.columns[expression]
                # Json column?
                if bag.columns.is_column_json(expression):
                    # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
                    expression_stmt = cast(expression_stmt, Float)
            elif isinstance(expression, dict):
                # Boolean expression
                expression_stmt = MongoCriteria.statement(bag, expression)
                # Need to cast it to int
                expression_stmt = cast(expression_stmt, Integer)
            else:
                raise AssertionError('Aggregate: expression should be either a column name, or an object')

            # Operator
            if operator == '$max':
                comp_stmt = func.max(expression_stmt)
            elif operator == '$min':
                comp_stmt = func.min(expression_stmt)
            elif operator == '$avg':
                comp_stmt = func.avg(expression_stmt)
            elif operator == '$sum':
                if isinstance(expression_stmt, int):
                    # Special case for count
                    comp_stmt = func.count()
                    if expression_stmt != 1:
                        comp_stmt *= expression_stmt
                else:
                    comp_stmt = func.sum(expression_stmt)
            else:
                raise AssertionError('Aggregate: unsupported operator "{}"'.format(operator))

            # Append
            selectables.append(comp_stmt.label(comp_field))

        return selectables
def mark_payments_as_sent(transaction_label: str,
                          batch_size: int = 1000) -> None:
    modified_sum = 0
    min_id = db.session.query(func.min(Payment.id)).filter(
        Payment.transactionLabel == transaction_label).scalar()
    max_id = db.session.query(func.max(Payment.id)).filter(
        Payment.transactionLabel == transaction_label).scalar()

    if min_id is None or max_id is None:
        logger.info("No payments needed to be marked as sent")
        return

    now = datetime.datetime.utcnow()
    for batch_start in range(min_id, max_id + 1, batch_size):
        payments_ids = get_payments_ids_under_review(batch_start, batch_size,
                                                     transaction_label)
        if len(payments_ids) == 0:
            continue

        payment_statuses_to_add: list[PaymentStatus] = []
        for payment_id in payments_ids:
            payment_statuses_to_add.append(
                PaymentStatus(paymentId=payment_id,
                              status=TransactionStatus.SENT,
                              date=now))

        db.session.bulk_save_objects(payment_statuses_to_add)
        mark_bookings_as_reimbursed_from_payment_ids(payments_ids, now)
        db.session.commit()

        modified_sum += len(payments_ids)

    logger.info("%d payments have been marked as sent for transaction %s",
                modified_sum, transaction_label)
示例#3
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by',
                                         'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id,
                           (sqlalchemy.func.max(table.c.prop_value) +
                            sqlalchemy.func.min(table.c.prop_value)).label('maxmin')] + filter_cols +
                          external_cols, from_obj=table,
                          group_by=([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id] +
                                    filter_cols + external_cols)), name='x')
        s2 = alias(
            select(
                [table.c.group_case_id,
                 sqlalchemy.cast(
                     cast(func.max(table.c.gender), Integer) + cast(func.min(table.c.gender), Integer), VARCHAR
                 ).label('gender')] + table_card_group,
                from_obj=table,
                group_by=[table.c.group_case_id] + table_card_group + having_group_by, having=group_having
            ), name='y'
        )
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select(
            [sqlalchemy.func.count(s1.c.doc_id).label(self.key)] + group_by,
            group_by=[s1.c.maxmin] + filter_cols + group_by,
            having=AND(having).build_expression(s1),
            from_obj=join(s1, s2, s1.c.group_case_id == s2.c.group_case_id)
        ).params(filter_values)
示例#4
0
def payments():
    payments = Payment.query.join(Ticket).with_entities(
        Payment,
        func.min(Ticket.expires).label('first_expires'),
        func.count(Ticket.id).label('ticket_count'),
    ).group_by(Payment).order_by(Payment.id).all()

    return render_template('admin/payments.html', payments=payments)
示例#5
0
def redditmeme_min_ts(subreddit: str) -> int:
    try:
        min_ts = cast(
            int,
            site_db.query(func.min(
                RedditMeme.timestamp)).filter_by(subreddit=subreddit).scalar(),
        )
        return min_ts
    except Exception:
        return 0
示例#6
0
def expiring():
    expiring = BankPayment.query.join(Ticket).filter(
        BankPayment.state == 'inprogress',
        Ticket.expires < datetime.utcnow() + timedelta(days=3),
    ).with_entities(
        BankPayment,
        func.min(Ticket.expires).label('first_expires'),
        func.count(Ticket.id).label('ticket_count'),
    ).group_by(BankPayment).order_by('first_expires').all()

    return render_template('admin/payments-expiring.html', expiring=expiring)
def calc_percentile_sub(subreddit: str):
    subreddit_clause = cast(ClauseElement, RedditMeme.subreddit == subreddit)
    if not (timestamp := cast(
            Union[int, None],
            site_db.query(func.max(RedditMeme.timestamp)).filter(
                and_(subreddit_clause,
                     cast(ClauseElement,
                          RedditMeme.percentile != None))).scalar(),
    )):
        max_ts = (cast(
            Arrow,
            arrow.get(
                cast(
                    int,
                    site_db.query(func.min(RedditMeme.timestamp)).filter(
                        subreddit_clause).scalar(),
                )),
        ).ceil("hour").shift(days=1))
示例#8
0
    def compile(self):
        # Json column?
        if self.is_column_json:
            # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
            column = cast(self.column, Float)
        else:
            # Simply use
            column = self.column

        # Now, handle the operator, and apply it to the expression
        if self.operator == '$max':
            stmt = func.max(column)
        elif self.operator == '$min':
            stmt = func.min(column)
        elif self.operator == '$avg':
            stmt = func.avg(column)
        elif self.operator == '$sum':
            stmt = func.sum(column)
        else:
            raise InvalidQueryError(
                'Aggregate: unsupported operator "{}"'.format(self.operator))
        return self.labeled_expression(stmt)
示例#9
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in [
                    'group', 'gender', 'group_leadership', 'disaggregate_by',
                    'table_card_group_by'
            ]:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values[
                'group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([
            table.c.doc_id, table.c.group_case_id, table.c.group_name,
            table.c.group_id,
            (sqlalchemy.func.max(table.c.prop_value) +
             sqlalchemy.func.min(table.c.prop_value)).label('maxmin')
        ] + filter_cols + external_cols,
                          from_obj=table,
                          group_by=([
                              table.c.doc_id, table.c.group_case_id,
                              table.c.group_name, table.c.group_id
                          ] + filter_cols + external_cols)),
                   name='x')
        s2 = alias(select([
            table.c.group_case_id,
            sqlalchemy.cast(
                cast(func.max(table.c.gender), Integer) +
                cast(func.min(table.c.gender), Integer),
                VARCHAR).label('gender')
        ] + table_card_group,
                          from_obj=table,
                          group_by=[table.c.group_case_id] + table_card_group +
                          having_group_by,
                          having=group_having),
                   name='y')
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select([sqlalchemy.func.count(s1.c.doc_id).label(self.key)] +
                      group_by,
                      group_by=[s1.c.maxmin] + filter_cols + group_by,
                      having=AND(having).build_expression(s1),
                      from_obj=join(s1, s2, s1.c.group_case_id ==
                                    s2.c.group_case_id)).params(filter_values)
示例#10
0
def home():

    total_texts = db_session.query(
        func.count(AllTweets.id).label('total_texts')).filter(
            AllTweets.context.is_(CONTEXT)).first().total_texts

    if total_texts == 0:
        return render_template("out.html")

    total_terms = db_session.query(
        func.count(Termos.id).label('total_terms')).filter(
            Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(
        func.count(AllTweets.id).label("total_processed")).filter(
            AllTweets.context.is_(CONTEXT)).filter(
                AllTweets.processed == 1).first().total_processed

    date_max = db_session.query(
        AllTweets.id,
        func.max(AllTweets.date).label('last_date')).filter(
            AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(
        AllTweets.id,
        func.min(AllTweets.date).label('last_date')).filter(
            AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(
        10)

    if HASHTAG == "True":
        query_a = Hashtags.query.filter(
            and_(Hashtags.hashtag.is_(SIDE_A),
                 Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(
            and_(Hashtags.hashtag.is_(SIDE_B),
                 Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(
            and_(Termos.termo.is_(SIDE_A),
                 Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(
            and_(Termos.termo.is_(SIDE_B),
                 Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    profiles_info = get_profile()

    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    return render_template("index.html", values=dict_values)
示例#11
0
def sensi_report(info_role):
    """
    get the UUID report of a dataset

    .. :quickref: Metadata;
    """
    """
    get the UUID report of a dataset

    .. :quickref: Metadata;
    """

    params = request.args
    ds_id = params["id_dataset"]
    dataset = TDatasets.query.get_or_404(ds_id)
    id_import = params.get("id_import")
    id_module = params.get("id_module")

    query = (DB.session.query(
        Synthese,
        func.taxonomie.find_cdref(Synthese.cd_nom).label("cd_ref"),
        func.array_agg(LAreas.area_name).label("codeDepartementCalcule"),
        func.ref_nomenclatures.get_cd_nomenclature(
            Synthese.id_nomenclature_sensitivity).label("cd_sensi"),
        func.ref_nomenclatures.get_nomenclature_label(
            Synthese.id_nomenclature_bio_status,
            "fr").label("occStatutBiologique"),
        func.min(CorSensitivitySynthese.meta_update_date).label(
            "sensiDateAttribution"),
        func.min(
            CorSensitivitySynthese.sensitivity_comment).label("sensiAlerte"),
        TNomenclatures.cd_nomenclature,
        TNomenclatures.label_fr).select_from(Synthese).outerjoin(
            CorAreaSynthese,
            CorAreaSynthese.id_synthese == Synthese.id_synthese).outerjoin(
                LAreas, LAreas.id_area == CorAreaSynthese.id_area).outerjoin(
                    CorSensitivitySynthese,
                    CorSensitivitySynthese.uuid_attached_row ==
                    Synthese.unique_id_sinp,
                ).outerjoin(
                    TNomenclatures, TNomenclatures.id_nomenclature ==
                    Synthese.id_nomenclature_sensitivity).filter(
                        LAreas.id_type == func.ref_geo.get_id_area_type("DEP"))
             )

    if id_module:
        query = query.filter(Synthese.id_module == id_module)

    query = query.filter(Synthese.id_dataset == ds_id)

    if id_import:
        query = query.outerjoin(
            TSources, TSources.id_source == Synthese.id_source).filter(
                TSources.name_source == "Import(id={})".format(id_import))

    data = query.group_by(Synthese.id_synthese, TNomenclatures.cd_nomenclature,
                          TNomenclatures.label_fr).all()

    str_productor = ""
    header = ""
    if len(data) > 0:
        index_productor = -1
        if dataset.cor_dataset_actor:
            for index, actor in enumerate(dataset.cor_dataset_actor):
                # cd_nomenclature producteur = 6
                if actor.nomenclature_actor_role.cd_nomenclature == "6":
                    index_productor = index
            productor = (dataset.cor_dataset_actor[index_productor]
                         if index_productor != -1 else None)
            if productor:
                if not productor.organism:
                    str_productor = productor.role.nom_complet
                else:
                    str_productor = productor.organism.nom_organisme
    data = [{
        "cdNom": row.Synthese.cd_nom,
        "cdRef": row.cd_ref,
        "codeDepartementCalcule": ", ".join(row.codeDepartementCalcule),
        "identifiantOrigine": row.Synthese.entity_source_pk_value,
        "occStatutBiologique": row.occStatutBiologique,
        "identifiantPermanent": row.Synthese.unique_id_sinp,
        "sensiAlerte": row.sensiAlerte,
        "sensible": "Oui" if row.cd_sensi != "0" else "Non",
        "sensiDateAttribution": row.sensiDateAttribution,
        "sensiNiveau": f"{row.cd_nomenclature} = {row.label_fr}",
    } for row in data]
    sensi_version = DB.session.query(
        func.gn_commons.get_default_parameter(
            'ref_sensi_version')).one_or_none()
    if sensi_version:
        sensi_version = sensi_version[0]
    # set an header only if the rapport is on a dataset
    header = f""""Rapport de sensibilité"
        "Jeu de données";"{dataset.dataset_name}"
        "Identifiant interne";"{dataset.id_dataset}"
        "Identifiant SINP";"{dataset.unique_dataset_id}"
        "Organisme/personne fournisseur";"{str_productor}"
        "Date de création du rapport";"{dt.datetime.now().strftime("%d/%m/%Y %Hh%M")}"
        "Nombre de données sensibles";"{len(list(filter(lambda row: row["sensible"] == "Oui", data)))}"
        "Nombre de données total dans le fichier";"{len(data)}"
        "sensiVersionReferentiel";"{sensi_version}"
        """

    return my_csv_resp(
        filename="filename",
        data=data,
        columns=[
            "cdNom",
            "cdRef",
            "codeDepartementCalcule",
            "identifiantOrigine",
            "occStatutBiologique",
            "identifiantPermanent",
            "sensiAlerte",
            "sensible",
            "sensiDateAttribution",
            "sensiNiveau",
        ],
        _header=header,
    )
示例#12
0
    def get_count(cls):
        with new_session() as session:
            min_id = session.query(func.min(ErrorReport.id)).scalar() or 0
            max_id = session.query(func.max(ErrorReport.id)).scalar() or 0

            return max_id - min_id
示例#13
0
 def get_count(cls):
     with new_session() as session:
         return (session.query(func.max(Result.id)).scalar() or 0) \
                - (session.query(func.min(Result.id)).scalar() or 0)
示例#14
0
    def selectables(cls, bag, agg_spec):
        """ Create a list of statements from spec

        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: list[sqlalchemy.sql.elements.ColumnElement]
        """
        # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/
        selectables = []
        for comp_field, comp_expression in agg_spec.items():
            # Column reference
            if isinstance(comp_expression, string_types):
                selectables.append(
                    bag.columns[comp_expression].label(comp_field))
                continue

            # Computed expression
            assert isinstance(
                comp_expression, dict
            ), 'Aggregate: Expression should be either a column name, or an object'
            assert len(
                comp_expression
            ) == 1, 'Aggregate: expression can only contain a single operator'
            operator, expression = comp_expression.popitem()

            # Expression statement
            if isinstance(expression, int) and operator == '$sum':
                # Special case for count
                expression_stmt = expression
            elif isinstance(expression, string_types):
                # Column name
                expression_stmt = bag.columns[expression]
                # Json column?
                if bag.columns.is_column_json(expression):
                    # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
                    expression_stmt = cast(expression_stmt, Float)
            elif isinstance(expression, dict):
                # Boolean expression
                expression_stmt = MongoCriteria.statement(bag, expression)
                # Need to cast it to int
                expression_stmt = cast(expression_stmt, Integer)
            else:
                raise AssertionError(
                    'Aggregate: expression should be either a column name, or an object'
                )

            # Operator
            if operator == '$max':
                comp_stmt = func.max(expression_stmt)
            elif operator == '$min':
                comp_stmt = func.min(expression_stmt)
            elif operator == '$avg':
                comp_stmt = func.avg(expression_stmt)
            elif operator == '$sum':
                if isinstance(expression_stmt, int):
                    # Special case for count
                    comp_stmt = func.count()
                    if expression_stmt != 1:
                        comp_stmt *= expression_stmt
                else:
                    comp_stmt = func.sum(expression_stmt)
            else:
                raise AssertionError(
                    'Aggregate: unsupported operator "{}"'.format(operator))

            # Append
            selectables.append(comp_stmt.label(comp_field))

        return selectables
示例#15
0
 def get_count(cls):
     with new_session() as session:
         return (session.query(func.max(Result.id)).scalar() or 0) \
                - (session.query(func.min(Result.id)).scalar() or 0)
示例#16
0
def home():

    total_texts = db_session.query(func.count(AllTweets.id).label('total_texts')).filter(AllTweets.context.is_(CONTEXT)).first().total_texts


    total_terms = db_session.query(func.count(Termos.id).label('total_terms')).filter(Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(func.count(AllTweets.id).label("total_processed")).filter(AllTweets.context.is_(CONTEXT)).filter(AllTweets.processed==1).first().total_processed
    

    date_max = db_session.query(AllTweets.id, func.max(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(AllTweets.id, func.min(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(10)


    if HASHTAG == "True":
        query_a = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_A),Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_B),Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(and_(Termos.termo.is_(SIDE_A),Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(and_(Termos.termo.is_(SIDE_B),Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    
    profiles_info = get_profile()

    query_texts = db_session.query(AllTweets)
    all_ = []
    for q in query_texts:

        teste = q.text.decode('UTF-8')

        t = {}
        t['tweet_id'] = q.tweet_id
        t['user'] = q.user
        t['text'] = teste
        t['date'] = q.date
        all_.append(t)

    tweets = jsonify(**{'list': all_})


    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    """
        ------------------Paginação---------------------
    """
    
    current_page = request.args.get('page', 1, type=int)

    num_posts = total_texts
    total_num_pages = int(math.ceil(num_posts / items_per_page))
    iter_pages = list(range(1, total_num_pages + 1))

    """
        ------------------Paginação---------------------
    """


    return render_template("home.html",values=dict_values, tweets=all_, iter_pages=iter_pages,
                           current_page=current_page, total_pages=total_num_pages)
示例#17
0
def map():

    total_texts = db_session.query(func.count(AllTweets.id).label('total_texts')).filter(AllTweets.context.is_(CONTEXT)).first().total_texts

    total_terms = db_session.query(func.count(Termos.id).label('total_terms')).filter(Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(func.count(AllTweets.id).label("total_processed")).filter(AllTweets.context.is_(CONTEXT)).filter(AllTweets.processed==1).first().total_processed
    
    date_max = db_session.query(AllTweets.id, func.max(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(AllTweets.id, func.min(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(10)

    if HASHTAG == "True":
        query_a = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_A),Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_B),Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(and_(Termos.termo.is_(SIDE_A),Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(and_(Termos.termo.is_(SIDE_B),Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    profiles_info = get_profile()

    query_texts = db_session.query(AllTweets)
    all_ = []
    for q in query_texts:

        teste = q.text.decode('UTF-8')

        t = {}
        t['tweet_id'] = q.tweet_id
        t['user'] = q.user
        t['text'] = teste
        t['date'] = q.date
        all_.append(t)

    tweets = jsonify(**{'list': all_})

    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    def push_github():

        data = []
        geoms = []
        tweet_features = []
        with open('raw.json') as twtr_hamdata:    
            for satir in twtr_hamdata:
                data.append(json.loads(satir))

        for i in range(0,len(data)):
            geoms.append(data[i]["geo"]["coordinates"])
            #print (geoms[i][0], geoms[i][1])
            my_feature = Feature(geometry=Point((float(geoms[i][1]),float(geoms[i][0]))),\
            properties={"user_location":data[i]["user"]["location"],\
            "user_id": data[i]["id"],\
            "user_name":data[i]["user"]["name"],\
            "screen_name":data[i]["user"]["screen_name"],\
            "followers_count":data[i]["user"]["followers_count"],\
            "tweet":data[i]["text"],\
            "tweet_time":data[i]["created_at"]})
            tweet_features.append(my_feature)
	        #print tweet_features
        tweet_FeatureCollection = FeatureCollection(tweet_features[:])
        #print tweet_FeatureCollection["type"]
        try:
            #saveFile = open('tweets.geojson','a')
            data = json.dumps(tweet_FeatureCollection)
            #saveFile.close()
        except Exception as error:
            print ("Unable to write %s error"%error)


        g = Github('grandslav', '5kslj8130614')
        repo = g.get_user().get_repo("GeoJSONTweets")
        contents = repo.get_contents("/tweets.geojson")
        new_content = data
        # TODO: acrescentar arquivo em vez de substituir ("crimes01112018.geojson")
        repo.update_file("/tweets.geojson", "Updating geojson data", new_content, contents.sha)
        print("Arquivo .geojson atualizado")

    update = True
    if update:
        print("Atualizando geojson...")
        push_github()  # faz um push no repositorio com arquivo geojson atualizado

    return render_template('map.html', values=dict_values, title='Map')
示例#18
0
    def get_count(cls):
        with new_session() as session:
            min_id = session.query(func.min(ErrorReport.id)).scalar() or 0
            max_id = session.query(func.max(ErrorReport.id)).scalar() or 0

            return max_id - min_id