Esempio n. 1
0
    def selectables(cls, bag, agg_spec):
        """ Create a list of statements from spec

        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: list[sqlalchemy.sql.elements.ColumnElement]
        """
        # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/
        selectables = []
        for comp_field, comp_expression in agg_spec.items():
            # Column reference
            if isinstance(comp_expression, basestring):
                selectables.append(bag.columns[comp_expression].label(comp_field))
                continue

            # Computed expression
            assert isinstance(comp_expression, dict), 'Aggregate: Expression should be either a column name, or an object'
            assert len(comp_expression) == 1, 'Aggregate: expression can only contain a single operator'
            operator, expression = comp_expression.popitem()

            # Expression statement
            if isinstance(expression, int) and operator == '$sum':
                # Special case for count
                expression_stmt = expression
            elif isinstance(expression, basestring):
                # Column name
                expression_stmt = bag.columns[expression]
                # Json column?
                if bag.columns.is_column_json(expression):
                    # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
                    expression_stmt = cast(expression_stmt, Float)
            elif isinstance(expression, dict):
                # Boolean expression
                expression_stmt = MongoCriteria.statement(bag, expression)
                # Need to cast it to int
                expression_stmt = cast(expression_stmt, Integer)
            else:
                raise AssertionError('Aggregate: expression should be either a column name, or an object')

            # Operator
            if operator == '$max':
                comp_stmt = func.max(expression_stmt)
            elif operator == '$min':
                comp_stmt = func.min(expression_stmt)
            elif operator == '$avg':
                comp_stmt = func.avg(expression_stmt)
            elif operator == '$sum':
                if isinstance(expression_stmt, int):
                    # Special case for count
                    comp_stmt = func.count()
                    if expression_stmt != 1:
                        comp_stmt *= expression_stmt
                else:
                    comp_stmt = func.sum(expression_stmt)
            else:
                raise AssertionError('Aggregate: unsupported operator "{}"'.format(operator))

            # Append
            selectables.append(comp_stmt.label(comp_field))

        return selectables
Esempio n. 2
0
    def get_results(cls, offset_id=0, limit=1000, project_id=None):
        with new_session() as session:
            if int(offset_id) == 0:
                offset_id = session.query(func.max(Result.id)).scalar() or 0
            rows = session.query(
                Result.id, Result.project_id, Result.shortcode,
                Result.url, Result.encoding, Result.datetime
                ) \
                .filter(Result.id <= int(offset_id))

            if project_id is not None and project_id != 'None':
                rows = rows.filter(Result.project_id == project_id)
                alphabet = Project.get_plain(project_id).alphabet
            else:
                alphabet = None

            rows = rows.order_by(Result.id.desc()).limit(int(limit))

            for row in rows:
                ans = {
                    'id': row[0],
                    'project_id': row[1],
                    'shortcode': row[2],
                    'url': row[3],
                    'encoding': row[4],
                    'datetime': row[5]
                }
                if alphabet:
                    ans['seq_num'] = str_to_int(row[2], alphabet)
                yield ans
Esempio n. 3
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by',
                                         'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id,
                           (sqlalchemy.func.max(table.c.prop_value) +
                            sqlalchemy.func.min(table.c.prop_value)).label('maxmin')] + filter_cols +
                          external_cols, from_obj=table,
                          group_by=([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id] +
                                    filter_cols + external_cols)), name='x')
        s2 = alias(
            select(
                [table.c.group_case_id,
                 sqlalchemy.cast(
                     cast(func.max(table.c.gender), Integer) + cast(func.min(table.c.gender), Integer), VARCHAR
                 ).label('gender')] + table_card_group,
                from_obj=table,
                group_by=[table.c.group_case_id] + table_card_group + having_group_by, having=group_having
            ), name='y'
        )
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select(
            [sqlalchemy.func.count(s1.c.doc_id).label(self.key)] + group_by,
            group_by=[s1.c.maxmin] + filter_cols + group_by,
            having=AND(having).build_expression(s1),
            from_obj=join(s1, s2, s1.c.group_case_id == s2.c.group_case_id)
        ).params(filter_values)
Esempio n. 4
0
def generate_new_address(index=None):
    if index is None:
        result = db.session.query(func.max(Guest.address_index).label("max_index")).one()
        if result and result.max_index:
            index = result.max_index + 1
        else:
            index = 0
    return receiving_account.subkey(RECEIVING).subkey(index).address()
Esempio n. 5
0
    def get_execution_tables():
        """
        Return all the IODbPut objects found in model IODbPut.

        :return: ResultSet IODbPut objects
        """
        session = SQLManager.instance().get_session()
        execution_id = session.query(func.max(ToolWrapper.execution_id))
        return session.query(IODbPut).filter(IODbPut.rule_id == ToolWrapper.id).filter(ToolWrapper.execution_id == execution_id).all()
Esempio n. 6
0
    def get_set_toolwrappers():
        """
        Ask the database for toolwrappers of the current execution.

        The current execution is defined as the one with the highest id (it is auto_incrementing)

        :return: Set([ToolWrapper]) the set of toolwrappers of the current execution.
        """
        session = SQLManager.instance().get_session()
        set_toolwrappers = set()
        try:
            # query asking the db for the highest execution id
            execution_id = session.query(func.max(ToolWrapper.execution_id))
            Logger.instance().debug("Getting toolwrappers of the current execution. id = " + str(execution_id.one()[0]))
            set_toolwrappers = set(session.query(ToolWrapper).filter(ToolWrapper.execution_id == execution_id).all())
        except NoResultFound as e:
            raise e
        return set_toolwrappers
Esempio n. 7
0
def dag_next_execution(args):
    """
    Returns the next execution datetime of a DAG at the command line.
    >>> airflow dags next-execution tutorial
    2018-08-31 10:38:00
    """
    dag = get_dag(args.subdir, args.dag_id)

    if dag.get_is_paused():
        print("[INFO] Please be reminded this DAG is PAUSED now.", file=sys.stderr)

    with create_session() as session:
        max_date_subq = (
            session.query(func.max(DagRun.execution_date).label("max_date"))
            .filter(DagRun.dag_id == dag.dag_id)
            .subquery()
        )
        max_date_run: Optional[DagRun] = (
            session.query(DagRun)
            .filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date == max_date_subq.c.max_date)
            .one_or_none()
        )

        if max_date_run is None:
            print("[WARN] Only applicable when there is execution record found for the DAG.", file=sys.stderr)
            print(None)
            return

    next_info = dag.next_dagrun_info(dag.get_run_data_interval(max_date_run), restricted=False)
    if next_info is None:
        print(
            "[WARN] No following schedule can be found. "
            "This DAG may have schedule interval '@once' or `None`.",
            file=sys.stderr,
        )
        print(None)
        return

    print(next_info.logical_date.isoformat())
    for _ in range(1, args.num_executions):
        next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
        print(next_info.logical_date.isoformat())
def getLatestPappVersionInfos(name=None):
    from peek_server.storage import dbConn
    session = dbConn.ormSession

    maxGroupedIds = (session.query(func.max(
        PeekAppInfo.id).label('maxId')).group_by(
            PeekAppInfo.name).subquery('maxGroupedIds'))

    qry = (session.query(PeekAppInfo).filter(
        PeekAppInfo.id == maxGroupedIds.c.maxId).order_by(
            PeekAppInfo.name, PeekAppInfo.id))

    if name:
        qry = qry.filter(PeekAppInfo.name == name)

    tuples = qry.all()

    session.expunge_all()

    return tuples
Esempio n. 9
0
    def save_with_next_available_id(self,
                                    req: "CamcopsRequest",
                                    device_id: int,
                                    era: str = ERA_NOW) -> None:
        """
        Save a record with the next available client pk in sequence.
        This is of use when creating patients and ID numbers on the server
        to ensure uniqueness, or when fixing up a missing ID number for
        a patient created on a device.
        """
        cls = self.__class__

        saved_ok = False

        # MySql doesn't support "select for update" so we have to keep
        # trying the next available ID and checking for an integrity
        # error in case another user has grabbed it by the time we have
        # committed
        # noinspection PyProtectedMember
        last_id = (
            req.dbsession
            # func.max(cls.id) + 1 here will do the right thing for
            # backends that support select for update (maybe not for no rows)
            .query(func.max(
                cls.id)).filter(cls._device_id == device_id).filter(
                    cls._era == era).scalar()) or 0

        next_id = last_id + 1

        while not saved_ok:
            self.id = next_id

            req.dbsession.add(self)

            try:
                req.dbsession.flush()
                saved_ok = True
            except IntegrityError:
                req.dbsession.rollback()
                next_id += 1
Esempio n. 10
0
    def compile(self):
        # Json column?
        if self.is_column_json:
            # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
            column = cast(self.column, Float)
        else:
            # Simply use
            column = self.column

        # Now, handle the operator, and apply it to the expression
        if self.operator == '$max':
            stmt = func.max(column)
        elif self.operator == '$min':
            stmt = func.min(column)
        elif self.operator == '$avg':
            stmt = func.avg(column)
        elif self.operator == '$sum':
            stmt = func.sum(column)
        else:
            raise InvalidQueryError(
                'Aggregate: unsupported operator "{}"'.format(self.operator))
        return self.labeled_expression(stmt)
Esempio n. 11
0
    def get_set_toolwrappers():
        """
        Ask the database for toolwrappers of the current execution.

        The current execution is defined as the one with the highest id (it is auto_incrementing)

        :return: Set([ToolWrapper]) the set of toolwrappers of the current execution.
        """
        session = SQLManager.instance().get_session()
        set_toolwrappers = set()
        try:
            # query asking the db for the highest execution id
            execution_id = session.query(func.max(ToolWrapper.execution_id))
            Logger.instance().debug(
                "Getting toolwrappers of the current execution. id = " +
                str(execution_id.one()[0]))
            set_toolwrappers = set(
                session.query(ToolWrapper).filter(
                    ToolWrapper.execution_id == execution_id).all())
        except NoResultFound as e:
            raise e
        return set_toolwrappers
Esempio n. 12
0
def save_with_next_available_id(obj: Base, dbsession: SqlASession) -> None:
    """
    Deliberately copied from cc_db.py and maintained separately

    Save a record with the next available client pk in sequence.
    """
    cls = obj.__class__

    saved_ok = False

    # MySql doesn't support "select for update" so we have to keep
    # trying the next available ID and checking for an integrity
    # error in case another user has grabbed it by the time we have
    # committed
    # noinspection PyProtectedMember
    last_id = (
        dbsession
        # func.max(cls.id) + 1 here will do the right thing for
        # backends that support select for update (maybe not for no rows)
        .query(func.max(cls.id))
        .filter(cls._device_id == obj._device_id)
        .filter(cls._era == ERA_NOW)
        .scalar()
    ) or 0

    next_id = last_id + 1

    while not saved_ok:
        obj.id = next_id

        dbsession.add(obj)

        try:
            dbsession.flush()
            saved_ok = True
        except IntegrityError:
            dbsession.rollback()
            next_id += 1
Esempio n. 13
0
async def get_chats(
        pagination: Pagination = Depends(get_pagination),
        current_user: UserResponse = Depends(get_current_verified_user),
        db: Session = Depends(get_db),
):
    """Get a list of all chats the user has."""

    chats_with_message = (db.query(
        models.Chat,
        models.Connector,
        models.Message,
        func.max(models.Message.sent_datetime),
    ).join(models.Connector).join(models.Message).group_by(
        models.Message.chat_id).filter(
            models.Connector.user_id == current_user.user_id).order_by(
                desc_op(models.Message.sent_datetime)).limit(
                    pagination.count).offset(pagination.page *
                                             pagination.count).all())

    if not chats_with_message:
        return []

    return [
        ChatResponse(
            chat_id=chat_with_message[0].chat_id,
            connector_id=chat_with_message[0].connector_id,
            name=chat_with_message[0].name,
            is_muted=chat_with_message[0].is_muted,
            is_archived=chat_with_message[0].is_archived,
            pin_position=chat_with_message[0].pin_position,
            last_message=schemas.MessageResponse(
                message_id=chat_with_message[2].message_id,
                contact_id=chat_with_message[2].contact_id,
                message=json.loads(chat_with_message[2].message),
                sent_datetime=chat_with_message[2].sent_datetime,
            ) if chat_with_message[2] else None,
        ) for chat_with_message in chats_with_message
    ]
Esempio n. 14
0
    def user_locked_out_until(cls, req: "CamcopsRequest",
                              username: str) -> Optional[Pendulum]:
        """
        When is the user locked out until?

        Args:
            req: :class:`camcops_server.cc_modules.cc_request.CamcopsRequest`
            username: the user's username

        Returns:
             Pendulum datetime in local timezone (or ``None`` if not
             locked out).
        """
        dbsession = req.dbsession
        now = req.now_utc
        locked_until_utc = dbsession.query(func.max(cls.locked_until))\
            .filter(cls.username == username)\
            .filter(cls.locked_until > now)\
            .scalar()  # type: Optional[Pendulum]
        # ... NOT first(), which returns (result,); we want just result
        if not locked_until_utc:
            return None
        return convert_datetime_to_local(locked_until_utc)
Esempio n. 15
0
File: demo.py Progetto: wiz21b/koi
def create_demo_database( nb_orders=50):
    mainlog.setLevel(logging.DEBUG)

    create_blank_database(configuration.get("Database", "admin_url"),
                          configuration.get("Database", "url"))
    dao = DAO()
    dao.set_session(session())

    random.seed(42)

    employees_texts  = ["Alfred Hitchcok", "Rocky Balboa", "Donald Knuth", "Ray Parker Junior", "Henry Mancini", "Nivek Ogre",
                 "Johnny Cash", "Sarah Connor"]

    nb_employees = len(employees_texts)

    for name in employees_texts:
        e = dao.employee_dao.make(name)
        e.login = (name.split(' ')[0][0:2] + name.split(' ')[1][0:2]).lower()
        e.set_encrypted_password(e.login)
        e.roles = RoleType.symbols()
        dao.employee_dao.save(e)


    for name in customers_texts:
        customer = dao.customer_dao.make(name)
        customer.address1 = u"Square Niklaus Wirth" + chr(233)
        customer.country = u"Pakistan" + chr(233)
        customer.phone = u"+494 0412 32 32 6654"
        customer.email = u"*****@*****.**"
        dao.customer_dao.save(customer)



    for name, short_name in operations_texts:
        opdef_op = dao.operation_definition_dao.make()
        opdef_op.short_id = short_name
        opdef_op.description = name
        opdef_op.imputable = True
        opdef_op.on_order = False
        opdef_op.on_operation = True
        opdef_op.XXXcost = random.random() * 50 + 50.0

        period = OperationDefinitionPeriod()
        period.start_date = date(2010, 1, 1)
        period.cost = random.randint(30, 60)
        dao.operation_definition_dao.add_period(period, opdef_op)
        dao.operation_definition_dao.save(opdef_op)

    customers = session().query(Customer).all()

    for i in range(nb_orders):
        order = dao.order_dao.make(u"Test order", customer)
        order.state = OrderStatusType.preorder_definition  # OrderStatusType.order_ready_for_production
        order.customer = customers[random.randint(0, len(customers) - 1)]
        order.creation_date = (world_begin + timedelta(days=random.randint(0, max_past_days))).date()
        dao.order_dao.save(order)

    for order in session().query(Order).all():

        position = 1
        for i in range(random.randint(3, 10)):
            order_part = dao.order_part_dao.make(order)

            texts = ["For part {}".format(random.randint(100, 999)),
                     "As plan {}".format(str(uuid.uuid4()).upper()[0:6]),
                     "Customer ref #{}".format(str(uuid.uuid4()).upper()[0:6]),
                     "#1 Bare Bright Copper Wire",
                     "#1 Copper Tubing",
                     "#1 Flashing Copper",
                     "#2 Copper Tubing",
                     "#2/3 Mix Copper",
                     "#3 Copper with Tar",
                     "#3 Roofing Copper",
                     "17-4 Stainless Steel",
                     "300 Series Stainless Steel",
                     "400 Series Stainless Steel",
                     "500/750 Insulated Cable",
                     "ACR",
                     "ACR Ends",
                     "AL Extrusion",
                     "AL Thermopane",
                     "AL/ Copper Rads w/Iron",
                     "AL/Copper Cutoffs",
                     "Alternators",
                     "Aluminum #3",
                     "Aluminum 6061",
                     "Aluminum 6063",
                     "Aluminum Boat",
                     "Aluminum Breakage",
                     "Aluminum Bumpers",
                     "Aluminum Cans",
                     "Aluminum Clips",
                     "Aluminum Copper Coil",
                     "Aluminum Copper Radiators",
                     "Aluminum Diesel Tank",
                     "Aluminum Engine Block",
                     "Aluminum Litho",
                     "Aluminum Radiators",
                     "Aluminum Rims",
                     "Aluminum Scrap",
                     "Aluminum Siding",
                     "Aluminum Thermo-Pane/Break",
                     "Aluminum Transformers",
                     "Aluminum Turnings",
                     "Aluminum Wire w/Steel",
                     "Ballasts",
                     "Bare Bright Copper",
                     "Brass Hair Wire",
                     "Brass Heater Cores",
                     "Brass Pipe",
                     "Brass Radiators",
                     "Brass Scrap",
                     "Brass Shells",
                     "Brass Turnings",
                     "Bronze",
                     "Bronze Turnings",
                     "Burnt Copper",
                     "Car/Truck Batteries",
                     "Carbide",
                     "Cast Aluminum",
                     "Catalytic Converters",
                     "CATV Wire",
                     "Christmas Lights",
                     "Circuit Breakers",
                     "Clean ACR",
                     "Clean AL Wire",
                     "Clean AL/Copper Fin",
                     "Clean Brass Radiators",
                     "Clean Brass Turnings",
                     "Clean Roofing Copper",
                     "Cobalt",
                     "Communications Wire",
                     "Composition Scrap",
                     "Compressors",
                     "Copper Scrap",
                     "Copper Transformers",
                     "Copper Turnings",
                     "Copper Yokes",
                     "Die Cast",
                     "Dirty ACR",
                     "Dirty AL Extrusion",
                     "Dirty AL Radiators",
                     "Dirty AL/Copper Fin",
                     "Dirty Aluminum Turnings",
                     "Dirty Brass",
                     "Dirty Brass Radiators",
                     "Dirty Roofing Copper",
                     "Double Insulated Cable",
                     "EC Wire",
                     "Electric Motors (Aluminum)",
                     "Electric Motors (Copper)",
                     "Elevator Wire",
                     "Enameled Copper",
                     "F 75",
                     "Fire Wire",
                     "Forktruck Battery",
                     "FSX 414",
                     "Fuses",
                     "Gold",
                     "Hastelloy Solids",
                     "Hastelloy Turnings",
                     "Heliax Wire",
                     "High Speed Steel",
                     "Housewire",
                     "Inconel",
                     "Inconel 792",
                     "Inconel 800",
                     "Inconel 825",
                     "Insulated Aluminum Wire",
                     "Insulated Copper Cable",
                     "Insulated Copper Wire",
                     "Insulated Steel BX",
                     "Invar",
                     "Junkshop Extrusion",
                     "Kovar",
                     "Lead",
                     "Lead Batteries",
                     "Lead Coated Copper",
                     "Lead Shot",
                     "Lead Wheel Weights",
                     "Light Copper",
                     "MarM247",
                     "Meatballs (Electric Motors)",
                     "Monel",
                     "Ni-Cad Batteries",
                     "Nickel",
                     "Non Magnetic Stainless Steel",
                     "Old Sheet Aluminum",
                     "Painted Aluminum",
                     "Pewter",
                     "Platinum",
                     "Plumbers Brass",
                     "Prepared Aluminum",
                     "Red Brass",
                     "Refined Rebrass & Copper",
                     "Rod Brass",
                     "Rod Brass Turnings",
                     "Romex® Wire",
                     "Sealed Units",
                     "Semi-Red Brass",
                     "Sheet Aluminum",
                     "Silver",
                     "Silver Plated Copper",
                     "Solid Core Heliax",
                     "Stainless Steel",
                     "Stainless Steel Breakage",
                     "Stainless Steel Heatsinks",
                     "Stainless Steel Kegs",
                     "Stainless Steel Sinks",
                     "Stainless Turnings",
                     "Starters",
                     "Steel BX",
                     "Steel Case Batteries",
                     "THHN Wire",
                     "Tin Babbit",
                     "Tin Coated Copper",
                     "Tin Insulated Copper Wire",
                     "Unclean Brass Radiators",
                     "Wire Scrap",
                     "Wiring Harness",
                     "Yellow Brass",
                     "Zinc",
                     "Zorba",
                     "#1 Heavy Melting Steel",
                     "#1 HMS",
                     "#1 Prepared",
                     "#1 Steel",
                     "#2 Heavy Melting Steel",
                     "#2 HMS",
                     "#2 Prepared",
                     "Automobiles",
                     "Busheling",
                     "Car w/Tires",
                     "Cast Iron",
                     "Complete Car",
                     "Crushed Cars",
                     "Dishwashers",
                     "Dry Automobile",
                     "Dryers",
                     "Incomplete Car",
                     "Light Iron",
                     "Machine Shop Turning/Iron Borings",
                     "Plate & Structural Steel",
                     "Refrigerators",
                     "Scrap Iron",
                     "Sheet Iron",
                     "Shreddable Steel",
                     "Steel Shavings",
                     "Tin",
                     "Uncleaned Auto Cast",
                     "Unprepared Cast Iron",
                     "Unprepared HMS",
                     "Unprepared P&S",
                     "Washing Machines",
                     "Water Heaters",
                     "Wet Automobile",
                     "Back Panels",
                     "Backup Batteries",
                     "Cellphones",
                     "Computer Wire",
                     "CPU Chips",
                     "CRT",
                     "Empty PC Servers",
                     "Hard Drive Boards",
                     "Hard Drives",
                     "Hard Drives without Boards",
                     "Ink Cartridges",
                     "Keyboards",
                     "Laptops",
                     "LCD Monitors (not working)",
                     "LCD Monitors (working)",
                     "Low Grade Boards",
                     "Mainframes",
                     "Memory",
                     "Mice",
                     "Motherboards",
                     "Non-Green PC Board",
                     "PC Board with Steel",
                     "PC Boards",
                     "PC Tower",
                     "Power Supplies",
                     "Printers/Fax Machines",
                     "Servers",
                     "Speakers",
                     "Telecom Equipment"]
            order_part.description = random.choice(texts)
            order_part.position = position
            order_part.priority = random.randint(1, 5)
            position += 1
            order_part.qty = random.randint(4, 4+10)
            order_part.sell_price = random.randint(100, 200)
            dao.order_part_dao.save(order_part)

            pf = dao.production_file_dao.make()
            pf.order_part = order_part
            order_part.production_file = [pf]
            session().add(pf)
            session().flush()

    operation_definitions = session().query(OperationDefinition).all()

    for pf in session().query(ProductionFile).all():
        for i in range(random.randint(3, 10)):
            operation = dao.operation_dao.make()
            operation.production_file = pf

            begin = random.randint(0, len(lorem) - 5)
            end = begin + min(6, random.randint(begin, len(lorem) - 1))
            operation.description = " ".join(lorem[begin:end])
            operation.operation_model = random.choice(operation_definitions)
            operation.planned_hours = float(random.randint(1, 20)) / 5 # per unit
            session().add(operation)

    session().commit()

    for order in session().query(Order).all():
        dao.order_dao.recompute_position_labels(order)
        session().commit()

    tasks = []

    for operation in session().query(Operation).all():
        task = TaskOnOperation()
        task.operation_id = operation.operation_id
        session().add(task)
        session().flush()
        tasks.append(task)


    order_schedules = dict()

    for order in session().query(Order).all():

        mainlog.info("populating order")

        order_start = order.creation_date
        central_clock.set_now_function(lambda: datetime( order_start.year, order_start.month, order_start.day))

        if True or random.randint(0,10) > 1:
            # a production order
            order_end = order_start + timedelta(days=(30 + order.order_id % 20))

            mainlog.debug("Interval {} {}".format( order_start, order_end))

            dao.order_dao.change_order_state(order.order_id, OrderStatusType.preorder_definition)
            dao.order_dao.change_order_state(order.order_id, OrderStatusType.order_ready_for_production)
            order_schedules[order.order_id] = (order_start, order_end)
        else:
            # a preorder

            dao.order_dao.change_order_state(order.order_id, OrderStatusType.preorder_definition)
            dao.order_dao.change_order_state(order.order_id, OrderStatusType.preorder_sent)

    mainlog.info("There are {} tasks".format(len(tasks)))
    mainlog.info("There are {} order scheduled for work".format(len(order_schedules)))

    # _make_tar(TaskActionReportType.start_task, datetime.now(), e, task)

    employees = session().query(Employee).all()

    # Buld the list of tasks available on each day
    tasks_on_day = dict()
    for task in tasks:
        order = task.operation.production_file.order_part.order
        if order.order_id in order_schedules:
            order_start, order_end = order_schedules[order.order_id]

            for d in daterange( order_start, order_end):
                if d not in tasks_on_day:
                    tasks_on_day[d] = []
                tasks_on_day[d].append(task)


    for day in range( int(max_past_days)):
        d = world_begin + timedelta(days=2 + day)
        d = date( d.year, d.month, d.day)

        if d.weekday() not in (5, 6) and d in tasks_on_day:

            employees_with_work = []
            central_clock.set_now_function(lambda: datetime(d.year,d.month,d.day))

            # tasks we can work on

            workable_tasks = tasks_on_day[d]

            mainlog.debug("{} workable tasks".format(len(workable_tasks)))

            if workable_tasks:
                # Now put actual work on those tasks
                for employee in employees:
                    # Each employee may or may not work
                    if random.randint(0,10) > 2:

                        total_duration = 0

                        while total_duration < 8:
                            task = random.choice(workable_tasks)
                            duration = float(random.randint(1,4)) + float(random.randint(0,4)) / 4.0
                            tt = _make_timetrack( task.task_id, employee.employee_id,
                                                  d,
                                                  duration)
                            session().add(tt)

                            total_duration += duration


                        dts = DayTimeSynthesis()
                        dts.employee_id = tt.employee_id
                        dts.day = d
                        dts.presence_time = total_duration
                        session().add( dts)
                    else:
                        from koi.people_admin.people_admin_mapping import DayEventType, DayEvent
                        from koi.people_admin.people_admin_service import people_admin_service

                        de = DayEvent()
                        de.employee_id = employee.employee_id
                        de.event_type = random.choice(DayEventType.symbols())
                        people_admin_service.set_event_on_days( de, [ (d, 1) ])

    for i in range(3):
        for order in session().query(Order).filter(Order.state == OrderStatusType.order_ready_for_production).all():
            parts_ids_quantities = dict()

            for order_part in order.parts:
                mainlog.debug("qex = {} / {}".format(order_part.tex2, order_part.qty))
                if order_part.tex2 < order_part.qty and order_part.total_hours:
                    parts_ids_quantities[order_part.order_part_id] = random.randint(1, order_part.qty - order_part.tex2)

            if parts_ids_quantities:
                mainlog.debug("Creating delivery slip")
                order_start, order_end = order_schedules[order.order_id]

                for dsp in session().query(DeliverySlipPart).filter(DeliverySlipPart.order_part_id.in_(parts_ids_quantities.keys())).all():
                    if dsp.delivery_slip.creation > datetime( order_start.year, order_start.month, order_start.day):
                        order_start = dsp.delivery_slip.creation.date()

                mainlog.debug("{} {}".format( type(order_end), type(order_start)))
                mainlog.debug("{} {}".format( order_start, order_end))

                days_between = (order_end - order_start).days
                if days_between > 0:
                    the_now = order_start + timedelta( days=random.randint(1, 1 + int(days_between / 2)))
                    mainlog.debug("Adding slips to an order on {}".format(the_now))
                    the_now = datetime( the_now.year, the_now.month, the_now.day) + timedelta(seconds=random.randint(1,10000))

                    central_clock.set_now_function( lambda : the_now)

                    dao.delivery_slip_part_dao.make_delivery_slip_for_order( order.order_id, parts_ids_quantities,
                        the_now, False)

            session().commit()


    for order in session().query(Order).filter(Order.state == OrderStatusType.order_ready_for_production).all():
        parts_ids_quantities = dict()

        for order_part in order.parts:
            mainlog.debug("qex = {} / {}".format(order_part.tex2, order_part.qty))
            if order_part.tex2 < order_part.qty and order_part.total_hours:
                parts_ids_quantities[order_part.order_part_id] = order_part.qty - order_part.tex2


        if parts_ids_quantities:
            mainlog.debug("Creating last delivery slip")

            the_now = world_begin + timedelta(days=max_past_days + random.randint(1,grace_period))
            mainlog.debug("Adding slips to an order on {}".format(the_now))
            the_now = datetime( the_now.year, the_now.month, the_now.day) + timedelta(seconds=random.randint(1,10000))

            central_clock.set_now_function( lambda : the_now)

            dao.delivery_slip_part_dao.make_delivery_slip_for_order( order.order_id, parts_ids_quantities,
                the_now, False)


    # Now we adapt the sell price to match the costs

    TWO_PLACES = decimal.Decimal(10) ** -2
    for order_part in session().query(OrderPart).all():
        order_part.sell_price = decimal.Decimal(
            (1.0 + random.random()) * dao.order_part_dao.value_work_on_order_part_up_to_date(
                order_part.order_part_id, date.today())).quantize(TWO_PLACES)

    mainlog.info("Not completed orders = {}".format(
        session().query(Order).filter(Order.state != OrderStatusType.order_completed).count()))

    mainlog.info("Not completed parts = {}".format(
        session().query(OrderPart).filter(OrderPart.state != OrderPartStateType.completed).count()))

    mainlog.info("Maximum completion date for order parts = {}".format(
        session().query( func.max( OrderPart.completed_date)).scalar()))
Esempio n. 16
0
def get_rows_for_wide_csv(session_code):
    if session_code:
        sessions = [Session.objects_get(code=session_code)]
    else:
        sessions = dbq(Session).order_by('id').all()
    session_fields = get_fields_for_csv(Session)
    participant_fields = get_fields_for_csv(Participant)

    session_ids = [session.id for session in sessions]
    pps = (Participant.objects_filter(
        Participant.session_id.in_(session_ids)).order_by(
            Participant.id).all())
    session_cache = {row.id: row for row in sessions}

    session_config_fields = set()
    for session in sessions:
        for field_name in SessionConfig(session.config).editable_fields():
            session_config_fields.add(field_name)
    session_config_fields = list(session_config_fields)

    if not pps:
        # 1 empty row
        return [[]]

    header_row = [f'participant.{fname}' for fname in participant_fields]
    header_row += [f'session.{fname}' for fname in session_fields]
    header_row += [
        f'session.config.{fname}' for fname in session_config_fields
    ]
    rows = [header_row]

    for pp in pps:
        session = session_cache[pp.session_id]
        row = [getattr(pp, fname) for fname in participant_fields]
        row += [getattr(session, fname) for fname in session_fields]
        row += [session.config.get(fname) for fname in session_config_fields]
        rows.append(row)

    order_of_apps = _get_best_app_order(sessions)

    rounds_per_app = OrderedDict()
    for app_name in order_of_apps:
        try:
            models_module = get_models_module(app_name)
        except ModuleNotFoundError:
            # this should only happen with devserver because on production server,
            # you would need to resetdb after renaming an app.
            logger.warning(
                f'Cannot export data for app {app_name}, which existed when the session was run '
                f'but no longer exists.')
            continue

        highest_round_number = dbq(
            func.max(models_module.Subsession.round_number)).scalar()

        if highest_round_number is not None:
            rounds_per_app[app_name] = highest_round_number
    for app_name in rounds_per_app:
        for round_number in range(1, rounds_per_app[app_name] + 1):
            new_rows = get_rows_for_wide_csv_round(app_name, round_number,
                                                   sessions)
            for i in range(len(rows)):
                rows[i].extend(new_rows[i])

    return [[sanitize_for_csv(v) for v in row] for row in rows]
Esempio n. 17
0
def home():

    total_texts = db_session.query(func.count(AllTweets.id).label('total_texts')).filter(AllTweets.context.is_(CONTEXT)).first().total_texts


    total_terms = db_session.query(func.count(Termos.id).label('total_terms')).filter(Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(func.count(AllTweets.id).label("total_processed")).filter(AllTweets.context.is_(CONTEXT)).filter(AllTweets.processed==1).first().total_processed
    

    date_max = db_session.query(AllTweets.id, func.max(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(AllTweets.id, func.min(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(10)


    if HASHTAG == "True":
        query_a = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_A),Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_B),Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(and_(Termos.termo.is_(SIDE_A),Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(and_(Termos.termo.is_(SIDE_B),Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    
    profiles_info = get_profile()

    query_texts = db_session.query(AllTweets)
    all_ = []
    for q in query_texts:

        teste = q.text.decode('UTF-8')

        t = {}
        t['tweet_id'] = q.tweet_id
        t['user'] = q.user
        t['text'] = teste
        t['date'] = q.date
        all_.append(t)

    tweets = jsonify(**{'list': all_})


    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    """
        ------------------Paginação---------------------
    """
    
    current_page = request.args.get('page', 1, type=int)

    num_posts = total_texts
    total_num_pages = int(math.ceil(num_posts / items_per_page))
    iter_pages = list(range(1, total_num_pages + 1))

    """
        ------------------Paginação---------------------
    """


    return render_template("home.html",values=dict_values, tweets=all_, iter_pages=iter_pages,
                           current_page=current_page, total_pages=total_num_pages)
Esempio n. 18
0
def map():

    total_texts = db_session.query(func.count(AllTweets.id).label('total_texts')).filter(AllTweets.context.is_(CONTEXT)).first().total_texts

    total_terms = db_session.query(func.count(Termos.id).label('total_terms')).filter(Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(func.count(AllTweets.id).label("total_processed")).filter(AllTweets.context.is_(CONTEXT)).filter(AllTweets.processed==1).first().total_processed
    
    date_max = db_session.query(AllTweets.id, func.max(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(AllTweets.id, func.min(AllTweets.date).label('last_date')).filter(AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(10)

    if HASHTAG == "True":
        query_a = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_A),Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(and_(Hashtags.hashtag.is_(SIDE_B),Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(and_(Termos.termo.is_(SIDE_A),Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(and_(Termos.termo.is_(SIDE_B),Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    profiles_info = get_profile()

    query_texts = db_session.query(AllTweets)
    all_ = []
    for q in query_texts:

        teste = q.text.decode('UTF-8')

        t = {}
        t['tweet_id'] = q.tweet_id
        t['user'] = q.user
        t['text'] = teste
        t['date'] = q.date
        all_.append(t)

    tweets = jsonify(**{'list': all_})

    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    def push_github():

        data = []
        geoms = []
        tweet_features = []
        with open('raw.json') as twtr_hamdata:    
            for satir in twtr_hamdata:
                data.append(json.loads(satir))

        for i in range(0,len(data)):
            geoms.append(data[i]["geo"]["coordinates"])
            #print (geoms[i][0], geoms[i][1])
            my_feature = Feature(geometry=Point((float(geoms[i][1]),float(geoms[i][0]))),\
            properties={"user_location":data[i]["user"]["location"],\
            "user_id": data[i]["id"],\
            "user_name":data[i]["user"]["name"],\
            "screen_name":data[i]["user"]["screen_name"],\
            "followers_count":data[i]["user"]["followers_count"],\
            "tweet":data[i]["text"],\
            "tweet_time":data[i]["created_at"]})
            tweet_features.append(my_feature)
	        #print tweet_features
        tweet_FeatureCollection = FeatureCollection(tweet_features[:])
        #print tweet_FeatureCollection["type"]
        try:
            #saveFile = open('tweets.geojson','a')
            data = json.dumps(tweet_FeatureCollection)
            #saveFile.close()
        except Exception as error:
            print ("Unable to write %s error"%error)


        g = Github('grandslav', '5kslj8130614')
        repo = g.get_user().get_repo("GeoJSONTweets")
        contents = repo.get_contents("/tweets.geojson")
        new_content = data
        # TODO: acrescentar arquivo em vez de substituir ("crimes01112018.geojson")
        repo.update_file("/tweets.geojson", "Updating geojson data", new_content, contents.sha)
        print("Arquivo .geojson atualizado")

    update = True
    if update:
        print("Atualizando geojson...")
        push_github()  # faz um push no repositorio com arquivo geojson atualizado

    return render_template('map.html', values=dict_values, title='Map')
Esempio n. 19
0
 def _maxid(self):
     return db.session.query(func.max(Question.id)).first()
Esempio n. 20
0
mod_chk = [mod for mod in dir(adm_mods) if mod[0].isupper()]
def input_model():
    while True:
        mod = input('Model name?  ')
        if mod in mod_chk:
            return mod
        else:
            print('%s is not an admin Model' % repr(mod))

    
mod_name = input_model()
table_object_id = sess.query(TableObject.id). \
    filter_by(name=eval(mod_name).__tablename__).one()[0]

id_nbr = int(sess.query(func.max(Field.name)).one()[0][1:]) + 1

insp = inspect(eval(mod_name))
for aa, col in insp.columns.items():

    field = Field(active=True,
                  name='F%06d' % (id_nbr),
                  display_name=col.name,
                  table_object_id=table_object_id,
                  primary_key=col.primary_key,
                  organization_id=1,
                  order=1,
                  description=col.name,
                  data_type_id=type_map[col.type.__visit_name__][0])

    length = getattr(col.type, 'length', None)
Esempio n. 21
0
    def selectables(cls, bag, agg_spec):
        """ Create a list of statements from spec

        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: list[sqlalchemy.sql.elements.ColumnElement]
        """
        # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/
        selectables = []
        for comp_field, comp_expression in agg_spec.items():
            # Column reference
            if isinstance(comp_expression, string_types):
                selectables.append(
                    bag.columns[comp_expression].label(comp_field))
                continue

            # Computed expression
            assert isinstance(
                comp_expression, dict
            ), 'Aggregate: Expression should be either a column name, or an object'
            assert len(
                comp_expression
            ) == 1, 'Aggregate: expression can only contain a single operator'
            operator, expression = comp_expression.popitem()

            # Expression statement
            if isinstance(expression, int) and operator == '$sum':
                # Special case for count
                expression_stmt = expression
            elif isinstance(expression, string_types):
                # Column name
                expression_stmt = bag.columns[expression]
                # Json column?
                if bag.columns.is_column_json(expression):
                    # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
                    expression_stmt = cast(expression_stmt, Float)
            elif isinstance(expression, dict):
                # Boolean expression
                expression_stmt = MongoCriteria.statement(bag, expression)
                # Need to cast it to int
                expression_stmt = cast(expression_stmt, Integer)
            else:
                raise AssertionError(
                    'Aggregate: expression should be either a column name, or an object'
                )

            # Operator
            if operator == '$max':
                comp_stmt = func.max(expression_stmt)
            elif operator == '$min':
                comp_stmt = func.min(expression_stmt)
            elif operator == '$avg':
                comp_stmt = func.avg(expression_stmt)
            elif operator == '$sum':
                if isinstance(expression_stmt, int):
                    # Special case for count
                    comp_stmt = func.count()
                    if expression_stmt != 1:
                        comp_stmt *= expression_stmt
                else:
                    comp_stmt = func.sum(expression_stmt)
            else:
                raise AssertionError(
                    'Aggregate: unsupported operator "{}"'.format(operator))

            # Append
            selectables.append(comp_stmt.label(comp_field))

        return selectables
Esempio n. 22
0
    def get_count(cls):
        with new_session() as session:
            min_id = session.query(func.min(ErrorReport.id)).scalar() or 0
            max_id = session.query(func.max(ErrorReport.id)).scalar() or 0

            return max_id - min_id
Esempio n. 23
0
 def get_count(cls):
     with new_session() as session:
         return (session.query(func.max(Result.id)).scalar() or 0) \
                - (session.query(func.min(Result.id)).scalar() or 0)
Esempio n. 24
0
def home():

    total_texts = db_session.query(
        func.count(AllTweets.id).label('total_texts')).filter(
            AllTweets.context.is_(CONTEXT)).first().total_texts

    if total_texts == 0:
        return render_template("out.html")

    total_terms = db_session.query(
        func.count(Termos.id).label('total_terms')).filter(
            Termos.context.is_(CONTEXT)).first().total_terms
    total_processed = db_session.query(
        func.count(AllTweets.id).label("total_processed")).filter(
            AllTweets.context.is_(CONTEXT)).filter(
                AllTweets.processed == 1).first().total_processed

    date_max = db_session.query(
        AllTweets.id,
        func.max(AllTweets.date).label('last_date')).filter(
            AllTweets.context.is_(CONTEXT)).first().last_date
    date_min = db_session.query(
        AllTweets.id,
        func.min(AllTweets.date).label('last_date')).filter(
            AllTweets.context.is_(CONTEXT)).first().last_date

    termos, hashtags, usuarios_rt, usuarios_citados, bigram_trigram = load_from_db(
        10)

    if HASHTAG == "True":
        query_a = Hashtags.query.filter(
            and_(Hashtags.hashtag.is_(SIDE_A),
                 Hashtags.context.is_(CONTEXT))).first()
        query_b = Hashtags.query.filter(
            and_(Hashtags.hashtag.is_(SIDE_B),
                 Hashtags.context.is_(CONTEXT))).first()
    else:
        query_a = Termos.query.filter(
            and_(Termos.termo.is_(SIDE_A),
                 Termos.context.is_(CONTEXT))).first()
        query_b = Termos.query.filter(
            and_(Termos.termo.is_(SIDE_B),
                 Termos.context.is_(CONTEXT))).first()

    total_a = 0
    total_b = 0
    percent_a = 0
    percent_b = 0
    total = 0

    if query_a and query_b:
        total_a = float(query_a.frequencia)
        total_b = float(query_b.frequencia)

        total = total_a + total_b

        percent_a = (total_a / total) * 100
        percent_b = (total_b / total) * 100

    profiles_info = get_profile()

    dict_values = {
        'total_texts': total_texts,
        'total_terms': total_terms,
        'total_processed': total_processed,
        'date_max': date_max,
        'date_min': date_min,
        'side_a': SIDE_A,
        'side_b': SIDE_B,
        'termos': termos,
        'hashtags': hashtags,
        'usuarios_rt': usuarios_rt,
        'usuarios_citados': usuarios_citados,
        'total': (percent_a, percent_b),
        'total_value': (int(total_a), int(total_b)),
        'bigram_trigram': bigram_trigram,
        'context': CONTEXT,
        'profile_a': PROFILE_A,
        'profile_b': PROFILE_B,
        'dict_profile': profiles_info
    }

    return render_template("index.html", values=dict_values)
Esempio n. 25
0
roles.remove(from_role) # Remove from_role from choices.
to_role = input_role('TO')

do_you_want_message(from_role, to_role, role_mods)


for model in role_mods:
    recs = sess.query(eval(model)).filter_by(role_id=from_role.id). \
        filter_by(active=True).order_by('id')
    if recs.count() == 0:
        print('%s has 0 records. Nothing to dupe.' % model)
        continue
    
    initials = ''.join([c for c in model if c.isupper()]) # prepended to rec name.
    # The base number must guarantee uniqueness
    sequence  = sess.query(func.max(eval(model).id)).one()[0] + 3001

    for indx, rec  in enumerate(recs):
        new_name = '%s%08d' % (initials, sequence+indx)
        if new_name == rec.name:    
            print("name mis-match, new_name == rec.name, %s" % new_name)
        new_rec = eval(model)()  # A new model instance.
        for k,v in new_record(model, rec, name=new_name).items():
            setattr(new_rec, k, v)  # Loading the new instance.
        sess.add(new_rec) # New object pending commit.

    print()
    print("%d records were created for %s" % (recs.count(), model))
    ans = input("Do you want to commit them? (y or n)? ")
    if ans == 'y':
        sess.commit()   # Commit new records by model.
Esempio n. 26
0
 def get_periods_max_cost(self, session):
     return session.query(Period.cost, func.max(UserTotalNum.total_num).label("max_num")).\
             filter(Period.pid == UserTotalNum.period_id).\
             group_by(Period.pid).all()
Esempio n. 27
0
    def add_many(self, new_urls, **kwargs):
        assert not isinstance(new_urls, (str, bytes)), \
            'Expected a list-like. Got {}'.format(new_urls)
        referrer = kwargs.pop('referrer', None)
        top_url = kwargs.pop('top_url', None)

        new_urls = tuple(new_urls)

        if not new_urls:
            return ()

        assert isinstance(new_urls[0], dict), type(new_urls[0])
        url_strings = list(item['url'] for item in new_urls)

        if referrer:
            url_strings.append(referrer)

        if top_url:
            url_strings.append(top_url)

        with self._session() as session:
            query = insert(URLString).prefix_with('OR IGNORE')
            session.execute(query, [{'url': url} for url in url_strings])

            bind_values = dict(status=Status.todo)
            bind_values.update(**kwargs)

            bind_values['url_str_id'] = select([URLString.id])\
                .where(URLString.url == bindparam('url'))

            if referrer:
                bind_values['referrer_id'] = select([URLString.id])\
                    .where(URLString.url == bindparam('referrer'))
            if top_url:
                bind_values['top_url_str_id'] = select([URLString.id])\
                    .where(URLString.url == bindparam('top_url'))

            query = insert(URL).prefix_with('OR IGNORE').values(bind_values)

            all_row_values = []

            for item in new_urls:
                assert 'url' in item
                assert 'referrer' not in item
                assert 'top_url' not in item

                row_values = item

                if referrer:
                    row_values['referrer'] = referrer
                if top_url:
                    row_values['top_url'] = top_url

                all_row_values.append(row_values)

            last_primary_key = session.query(func.max(URL.id)).scalar() or 0

            session.execute(query, all_row_values)

            query = select([URLString.url]).where(
                and_(URL.id > last_primary_key,
                     URL.url_str_id == URLString.id)
                )
            added_urls = [row[0] for row in session.execute(query)]

        return added_urls
Esempio n. 28
0
def init_static() -> Static:
    names = [
        name for name, in cast(
            Iterator[str],
            training_db.query(MemeCorrectTrain.name).distinct(
                MemeCorrectTrain.name),
        )
    ]
    names_to_shuffle = deepcopy(names)
    name_num = {name: idx for idx, name in enumerate(names)}
    num_name = {str(v): k for k, v in name_num.items()}
    max_name_idx: TestTrainToMax = {
        "train": {
            "not_a_meme":
            cast(
                int,
                training_db.query(func.max(NotAMemeTrain.name_idx)).scalar(),
            ),
            "not_a_template":
            cast(
                int,
                training_db.query(func.max(
                    NotATemplateTrain.name_idx)).scalar(),
            ),
            "correct": {
                name: cast(
                    int,
                    training_db.query(func.max(
                        MemeCorrectTrain.name_idx)).filter(
                            cast(ClauseElement,
                                 MemeCorrectTrain.name == name)).scalar(),
                )
                for name in names
                if name not in ["not_a_meme", "not_a_template"]
            },
            "incorrect": {
                name: cast(
                    int,
                    training_db.query(func.max(
                        MemeIncorrectTrain.name_idx)).filter(
                            cast(ClauseElement,
                                 MemeIncorrectTrain.name == name)).scalar(),
                )
                for name in names
                if name not in ["not_a_meme", "not_a_template"]
            },
        },
        "test": {
            "not_a_meme":
            cast(
                int,
                training_db.query(func.max(NotAMemeTest.name_idx)).scalar(),
            ),
            "not_a_template":
            cast(
                int,
                training_db.query(func.max(
                    NotATemplateTest.name_idx)).scalar(),
            ),
            "correct": {
                name: cast(
                    int,
                    training_db.query(func.max(
                        MemeCorrectTest.name_idx)).filter(
                            cast(ClauseElement,
                                 MemeCorrectTest.name == name)).scalar(),
                )
                for name in names
                if name not in ["not_a_meme", "not_a_template"]
            },
            "incorrect": {
                name: cast(
                    int,
                    training_db.query(func.max(
                        MemeIncorrectTest.name_idx)).filter(
                            cast(ClauseElement,
                                 MemeIncorrectTest.name == name)).scalar(),
                )
                for name in names
                if name not in ["not_a_meme", "not_a_template"]
            },
        },
    }
    static: Static = {
        "names": names,
        "names_to_shuffle": names_to_shuffle,
        "name_num": name_num,
        "num_name": num_name,
        "folder_count": {
            "not_a_meme": len(os.listdir(NOT_MEME_REPO)),
            "not_a_template": len(os.listdir(NOT_TEMPLATE_REPO)),
            **{
                name: len(os.listdir(MEMES_REPO + name))
                for name in os.listdir(MEMES_REPO)
            },
        },
        "max_name_idx": max_name_idx,
    }
    return static
Esempio n. 29
0
 def latest_for_season(self, name, season):
     q = (self._db.query(func.max(Release.episode)).filter_by(
         name=name, season=season).join(ReleaseMonitor))
     return Maybe(*q.first())
Esempio n. 30
0
 def get_count(cls):
     with new_session() as session:
         return (session.query(func.max(Result.id)).scalar() or 0) \
                - (session.query(func.min(Result.id)).scalar() or 0)
Esempio n. 31
0
    def add_many(self, new_urls, **kwargs):
        assert not isinstance(new_urls, (str, bytes)), \
            'Expected a list-like. Got {}'.format(new_urls)
        referrer = kwargs.pop('referrer', None)
        top_url = kwargs.pop('top_url', None)

        new_urls = tuple(new_urls)

        if not new_urls:
            return ()

        assert isinstance(new_urls[0], dict), type(new_urls[0])
        url_strings = list(item['url'] for item in new_urls)

        if referrer:
            url_strings.append(referrer)

        if top_url:
            url_strings.append(top_url)

        with self._session() as session:
            query = insert(URLString).prefix_with('OR IGNORE')
            session.execute(query, [{'url': url} for url in url_strings])

            bind_values = dict(status=Status.todo)
            bind_values.update(**kwargs)

            bind_values['url_str_id'] = select([URLString.id])\
                .where(URLString.url == bindparam('url'))

            if referrer:
                bind_values['referrer_id'] = select([URLString.id])\
                    .where(URLString.url == bindparam('referrer'))
            if top_url:
                bind_values['top_url_str_id'] = select([URLString.id])\
                    .where(URLString.url == bindparam('top_url'))

            query = insert(URL).prefix_with('OR IGNORE').values(bind_values)

            all_row_values = []

            for item in new_urls:
                assert 'url' in item
                assert 'referrer' not in item
                assert 'top_url' not in item

                row_values = item

                if referrer:
                    row_values['referrer'] = referrer
                if top_url:
                    row_values['top_url'] = top_url

                all_row_values.append(row_values)

            last_primary_key = session.query(func.max(URL.id)).scalar() or 0

            session.execute(query, all_row_values)

            query = select([URLString.url]).where(
                and_(URL.id > last_primary_key,
                     URL.url_str_id == URLString.id))
            added_urls = [row[0] for row in session.execute(query)]

        return added_urls
Esempio n. 32
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in [
                    'group', 'gender', 'group_leadership', 'disaggregate_by',
                    'table_card_group_by'
            ]:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values[
                'group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([
            table.c.doc_id, table.c.group_case_id, table.c.group_name,
            table.c.group_id,
            (sqlalchemy.func.max(table.c.prop_value) +
             sqlalchemy.func.min(table.c.prop_value)).label('maxmin')
        ] + filter_cols + external_cols,
                          from_obj=table,
                          group_by=([
                              table.c.doc_id, table.c.group_case_id,
                              table.c.group_name, table.c.group_id
                          ] + filter_cols + external_cols)),
                   name='x')
        s2 = alias(select([
            table.c.group_case_id,
            sqlalchemy.cast(
                cast(func.max(table.c.gender), Integer) +
                cast(func.min(table.c.gender), Integer),
                VARCHAR).label('gender')
        ] + table_card_group,
                          from_obj=table,
                          group_by=[table.c.group_case_id] + table_card_group +
                          having_group_by,
                          having=group_having),
                   name='y')
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select([sqlalchemy.func.count(s1.c.doc_id).label(self.key)] +
                      group_by,
                      group_by=[s1.c.maxmin] + filter_cols + group_by,
                      having=AND(having).build_expression(s1),
                      from_obj=join(s1, s2, s1.c.group_case_id ==
                                    s2.c.group_case_id)).params(filter_values)
Esempio n. 33
0
    def get_count(cls):
        with new_session() as session:
            min_id = session.query(func.min(ErrorReport.id)).scalar() or 0
            max_id = session.query(func.max(ErrorReport.id)).scalar() or 0

            return max_id - min_id