Beispiel #1
0
def _process_null_records(table, col_name, check_fkeys, delete=False):
    """Queries the database and optionally deletes the NULL records.

    :param table: sqlalchemy.Table object.
    :param col_name: The name of the column to check in the table.
    :param check_fkeys: If True, check the table for foreign keys back to the
        instances table and if not found, return.
    :param delete: If true, run a delete operation on the table, else just
        query for number of records that match the NULL column.
    :returns: The number of records processed for the table and column.
    """
    records = 0
    if col_name in table.columns:
        # NOTE(mriedem): filter out tables that don't have a foreign key back
        # to the instances table since they could have stale data even if
        # instances.uuid wasn't NULL.
        if check_fkeys:
            fkey_found = False
            fkeys = table.c[col_name].foreign_keys or []
            for fkey in fkeys:
                if fkey.column.table.name == 'instances':
                    fkey_found = True

            if not fkey_found:
                return 0

        if delete:
            records = table.delete().where(
                table.c[col_name] == null()
            ).execute().rowcount
        else:
            records = len(list(
                table.select().where(table.c[col_name] == null()).execute()
            ))
    return records
Beispiel #2
0
    def test_reduce_aliased_union_2(self):
        metadata = MetaData()

        page_table = Table('page', metadata,
            Column('id', Integer, primary_key=True),
        )
        magazine_page_table = Table('magazine_page', metadata,
            Column('page_id', Integer, ForeignKey('page.id'), primary_key=True),
        )
        classified_page_table = Table('classified_page', metadata,
            Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True),
        )
        
       # this is essentially the union formed by the ORM's polymorphic_union function.
        # we define two versions with different ordering of selects.

        # the first selectable has the "real" column classified_page.magazine_page_id
        pjoin = union(
            select([
                page_table.c.id, 
                magazine_page_table.c.page_id, 
                classified_page_table.c.magazine_page_id
            ]).select_from(page_table.join(magazine_page_table).join(classified_page_table)),

            select([
                page_table.c.id, 
                magazine_page_table.c.page_id, 
                cast(null(), Integer).label('magazine_page_id')
            ]).select_from(page_table.join(magazine_page_table)),
            
        ).alias('pjoin')

        eq_(
            util.column_set(sql_util.reduce_columns([pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])),
            util.column_set([pjoin.c.id])
        )    

        # the first selectable has a CAST, which is a placeholder for
        # classified_page.magazine_page_id in the second selectable.  reduce_columns
        # needs to take into account all foreign keys derived from pjoin.c.magazine_page_id.
        # the UNION construct currently makes the external column look like that of the first
        # selectable only.
        pjoin = union(
            select([
                page_table.c.id, 
                magazine_page_table.c.page_id, 
                cast(null(), Integer).label('magazine_page_id')
            ]).select_from(page_table.join(magazine_page_table)),
            
            select([
                page_table.c.id, 
                magazine_page_table.c.page_id, 
                classified_page_table.c.magazine_page_id
            ]).select_from(page_table.join(magazine_page_table).join(classified_page_table))
        ).alias('pjoin')

        eq_(
            util.column_set(sql_util.reduce_columns([pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])),
            util.column_set([pjoin.c.id])
        )    
Beispiel #3
0
 def test_union_against_join(self):
     # same as testunion, except its an alias of the union
     u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly')]).union(
             select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])
         ).alias('analias')
     j1 = table1.join(table2)
     assert u.corresponding_column(j1.c.table1_colx) is u.c.colx
     assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx
Beispiel #4
0
 def col(name, table):
     try:
         return colnamemaps[table][name]
     except KeyError:
         if cast_nulls:
             return sql.cast(sql.null(), types[name]).label(name)
         else:
             return sql.type_coerce(sql.null(), types[name]).label(name)
Beispiel #5
0
 def test_select_union(self):
     # like testaliasunion, but off a Select off the union.
     u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly')]).union(
             select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])
         ).alias('analias')
     s = select([u])
     s1 = table1.select(use_labels=True)
     s2 = table2.select(use_labels=True)
     assert s.corresponding_column(s1.c.table1_col2) is s.c.col2
     assert s.corresponding_column(s2.c.table2_col2) is s.c.col2
Beispiel #6
0
    def ___images_by_windowed_meta(
        self,
        context,
        period_start,
        period_stop,
        project_id,
        metadata
    ):
        """Simulated bottom most layer

        :param context:
        :param period_start: Datetime
        :param period_stop: Datetime
        :param project_id: String
        :param metadata:
        """
        if metadata:
            aliases = [aliased(models.ImageProperty) for i in metadata]
        else:
            aliases = []

        session = get_session()
        query = session.query(
            models.Image,
            *aliases
        )
        query = query.filter(or_(models.Image.deleted_at == null(),
                                 models.Image.deleted_at > period_start))

        if period_stop:
            query = query.filter(models.Image.created_at < period_stop)

        if project_id:
            query = query.filter_by(project_id=project_id)

        if metadata:
            for keypair, alias in zip(metadata.items(), aliases):
                query = query.filter(alias.name == keypair[0])
                query = query.filter(alias.value == keypair[1])
                query = query.filter(alias.image_id == models.Image.id)
                query = query.filter(or_(
                    alias.deleted_at == null(),
                    alias.deleted_at == models.Image.deleted_at
                ))

        images = []
        for tup in query.all():
            if aliases:
                image = tup[0]
                # props = tup[1:]
            else:
                image = tup
                # props = None
            images.append(dict(image))
        return images
Beispiel #7
0
 def test_union(self):
     # tests that we can correspond a column in a Select statement with a certain Table, against
     # a column in a Union where one of its underlying Selects matches to that same Table
     u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly')]).union(
             select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])
         )
     s1 = table1.select(use_labels=True)
     s2 = table2.select(use_labels=True)
     c = u.corresponding_column(s1.c.table1_col2)
     assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
     assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
Beispiel #8
0
 def test_alias_union(self):
     # same as testunion, except its an alias of the union
     u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly')]).union(
             select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])
         ).alias('analias')
     s1 = table1.select(use_labels=True)
     s2 = table2.select(use_labels=True)
     assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
     assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
     assert u.corresponding_column(s2.c.table2_coly) is u.c.coly
     assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly
Beispiel #9
0
    def ___get_active_by_window_metadata(self, context, period_start,
                                         period_stop=None,
                                         project_id=None,
                                         metadata=None,
                                         use_slave=False):
        """Simulate bottom most layer

        :param context: wsgi context
        :param period_start: Datetime
        :param period_stop: Datetime
        :param project_id: String|None
        :param metadata: Dict|None
        :param use_slave: Boolean
        """
        if metadata:
            aliases = [aliased(models.VolumeMetadata) for i in metadata]
        else:
            aliases = []
        session = get_session(use_slave=use_slave)
        query = session.query(
            models.Volume,
            *aliases
        )

        query = query.filter(or_(models.Volume.terminated_at == null(),
                                 models.Volume.terminated_at > period_start))

        if period_stop:
            query = query.filter(models.Volume.launched_at < period_stop)

        if project_id:
            query = query.filter_by(project_id=project_id)

        if metadata:
            for keypair, alias in zip(metadata.items(), aliases):
                query = query.filter(alias.key == keypair[0])
                query = query.filter(alias.value == keypair[1])
                query = query.filter(alias.volume_id == models.Volume.id)
                query = query.filter(or_(
                    alias.deleted_at == null(),
                    alias.deleted_at == models.Volume.deleted_at
                ))

        volumes = []
        for tup in query.all():
            # If no metadata filters, then no aliases.
            if aliases:
                volume = tup[0]
            else:
                volume = tup
            volumes.append(dict(volume))
        return volumes
Beispiel #10
0
def _get_node_empty_ratio(context, max_count):
    """Query the DB for non-deleted compute_nodes with 0.0/None alloc ratios

    Results are limited by ``max_count``.
    """
    return context.session.query(models.ComputeNode).filter(or_(
        models.ComputeNode.ram_allocation_ratio == '0.0',
        models.ComputeNode.cpu_allocation_ratio == '0.0',
        models.ComputeNode.disk_allocation_ratio == '0.0',
        models.ComputeNode.ram_allocation_ratio == null(),
        models.ComputeNode.cpu_allocation_ratio == null(),
        models.ComputeNode.disk_allocation_ratio == null()
    )).filter(models.ComputeNode.deleted == 0).limit(max_count).all()
Beispiel #11
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    query = DB.query(Group,
        func.count(distinct(Package.id)).label('nb_datasets'),
        func.count(distinct(Member.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(Package, and_(
        Group.id == Package.owner_org,
        ~Package.private,
        Package.state == 'active',
    ))
    query = query.outerjoin(Member, and_(
        Member.group_id == Group.id,
        Member.state == 'active',
        Member.table_name == 'user'
    ))
    query = query.filter(Group.state == 'active')
    query = query.filter(Group.approval_status == 'approved')
    query = query.filter(Group.is_organization == True)
    query = query.group_by(Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        Group.title
    )
    query = query.options(orm.joinedload(Group.certified_public_service))
    return query
Beispiel #12
0
def bm_node_get_associated(context, service_host=None):
    query = model_query(context, models.BareMetalNode, read_deleted="no").filter(
        models.BareMetalNode.instance_uuid != null()
    )
    if service_host:
        query = query.filter_by(service_host=service_host)
    return query.all()
def scan_for_null_records(table, col_name, check_fkeys):
    """Queries the table looking for NULL instances of the given column.

    :param col_name: The name of the column to look for in the table.
    :param check_fkeys: If True, check the table for foreign keys back to the
        instances table and if not found, return.
    :raises: exception.ValidationError: If any records are found.
    """
    if col_name in table.columns:
        # NOTE(mriedem): filter out tables that don't have a foreign key back
        # to the instances table since they could have stale data even if
        # instances.uuid wasn't NULL.
        if check_fkeys:
            fkey_found = False
            fkeys = table.c[col_name].foreign_keys or []
            for fkey in fkeys:
                if fkey.column.table.name == 'instances':
                    fkey_found = True

            if not fkey_found:
                return

        records = len(list(
            table.select().where(table.c[col_name] == null()).execute()
        ))
        if records:
            msg = _("There are %(records)d records in the "
                    "'%(table_name)s' table where the uuid or "
                    "instance_uuid column is NULL. These must be "
                    "manually cleaned up before the migration will pass. "
                    "Consider running the "
                    "'nova-manage db null_instance_uuid_scan' command.") % (
                    {'records': records, 'table_name': table.name})
            raise exception.ValidationError(detail=msg)
Beispiel #14
0
    def check_versions(self):
        """Checks the whole database for incompatible objects.

        This scans all the tables in search of objects that are not supported;
        i.e., those that are not specified in
        `ironic.common.release_mappings.RELEASE_MAPPING`. This includes objects
        that have null 'version' values.

        :returns: A Boolean. True if all the objects have supported versions;
                  False otherwise.
        """
        object_versions = release_mappings.get_object_versions()
        for model in models.Base.__subclasses__():
            if model.__name__ in object_versions:
                supported_versions = object_versions[model.__name__]
                if not supported_versions:
                    continue
                # NOTE(rloo): .notin_ does not handle null:
                # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.operators.ColumnOperators.notin_
                query = model_query(model).filter(
                    sql.or_(model.version == sql.null(),
                            model.version.notin_(supported_versions)))
                if query.count():
                    return False
        return True
Beispiel #15
0
def update_beacon_receiver_distance(name):
    """
    Calculate the distance between the receiver and its received aircrafts
    and write this data into each aircraft_beacon.
    """

    last_receiver_beacon = app.session.query(ReceiverBeacon) \
        .filter(ReceiverBeacon.name == name) \
        .order_by(desc(ReceiverBeacon.timestamp)) \
        .first()

    if (last_receiver_beacon is None):
        return

    aircraft_beacons_query = app.session.query(AircraftBeacon) \
        .filter(and_(AircraftBeacon.timestamp > last_receiver_beacon.timestamp,
                     AircraftBeacon.receiver_name == name,
                     AircraftBeacon.radius == null()))

    for aircraft_beacon in aircraft_beacons_query.all():
        location0 = (last_receiver_beacon.latitude, last_receiver_beacon.longitude)
        location1 = (aircraft_beacon.latitude, aircraft_beacon.longitude)
        alt0 = last_receiver_beacon.altitude
        alt1 = aircraft_beacon.altitude

        (flat_distance, phi) = haversine_distance(location0, location1)
        theta = atan2(alt1 - alt0, flat_distance) * 180 / pi
        distance = sqrt(flat_distance**2 + (alt1 - alt0)**2)

        aircraft_beacon.radius = distance
        aircraft_beacon.theta = theta
        aircraft_beacon.phi = phi

    app.session.commit()
    logger.warning("Updated receiver {}.".format(name))
Beispiel #16
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    memberships = aliased(model.Member)

    query = DB.query(model.Group,
        func.count(distinct(model.Package.id)).label('nb_datasets'),
        func.count(distinct(memberships.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(model.Package, and_(
        model.Group.id == model.Package.owner_org,
        ~model.Package.private,
        model.Package.state == 'active',
    ))
    query = query.outerjoin(memberships, and_(
        memberships.group_id == model.Group.id,
        memberships.state == 'active',
        memberships.table_name == 'user'
    ))
    query = query.filter(model.Group.state == 'active')
    query = query.filter(model.Group.approval_status == 'approved')
    query = query.filter(model.Group.is_organization == True)
    query = query.group_by(model.Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        model.Group.title
    )
    return query
Beispiel #17
0
def _get_build_requests_with_no_instance_uuid(context, limit):
    """Returns up to $limit build_requests where instance_uuid is null"""
    # build_requests don't use the SoftDeleteMixin so we don't have to filter
    # on the deleted column.
    return context.session.query(api_models.BuildRequest).\
        filter_by(instance_uuid=null()).\
        limit(limit).\
        all()
def upgrade():
    network_iface = (CONF.default_network_interface or
                     ('flat' if CONF.dhcp.dhcp_provider == 'neutron'
                      else 'noop'))
    op.execute(
        node.update().where(
            node.c.network_interface == null()).values(
                {'network_interface': network_iface}))
Beispiel #19
0
 def const( me, value):
     if _debug: print '?const', value
     if value is None: return sql.null()
     try:
         m = object_mapper( value)
     except Exception, e:
         #print 'XXXXXXXXXXXXXX', e
         pass
Beispiel #20
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, "_clock_jump_canary", timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(
                _LW(
                    "Time since last L3 agent reschedule check has "
                    "exceeded the interval between checks. Waiting "
                    "before check to allow agents to send a heartbeat "
                    "in case there was a clock adjustment."
                )
            )
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding)
            .join(agents_db.Agent)
            .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
            .outerjoin(
                l3_attrs_db.RouterExtraAttributes,
                l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id,
            )
            .filter(
                sa.or_(
                    l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                    l3_attrs_db.RouterExtraAttributes.ha == sql.null(),
                )
            )
        )
        try:
            for binding in down_bindings:
                LOG.warn(
                    _LW(
                        "Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."
                    ),
                    {"router": binding.router_id, "agent": binding.l3_agent_id, "dead_time": agent_dead_limit},
                )
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed, n_rpc.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router " "rescheduling."))
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
                                 'Not rescheduling from agent in \'dvr\' '
                                 'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
def software_version(software_id, version_id):
    software = Software.query.filter_by(id=software_id).first()
    if not software:
        return (render_template("pyfarm/error.html",
                                error="Software %s not found" % software_id),
                NOT_FOUND)

    version = SoftwareVersion.query.filter_by(
        software=software, id=version_id).first()
    if not version:
        return (render_template("pyfarm/error.html",
                                error="Version %s not found" % version_id),
                NOT_FOUND)

    if request.method == "POST":
        version.discovery_code =\
            request.form["discovery_code"].strip() or sql.null()
        version.discovery_function_name =\
            request.form["discovery_function"].strip() or sql.null()

        if ((version.discovery_code is None and
             version.discovery_function_name is not None) or
            (version.discovery_code is not None and
             version.discovery_function_name is None)):
                return (render_template(
                    "pyfarm/error.html",
                    error="`discovery_code` and `discovery_function_name` must "
                          "either be both unset or both set" % software_id),
                BAD_REQUEST)

        db.session.add(version)
        db.session.commit()

        flash("Discovery code for version %s has been updated." %
              version.version)

        return redirect(url_for("single_software_version_ui",
                                software_id=software.id,
                                version_id=version.id),
                        SEE_OTHER)

    else:
        return render_template("pyfarm/user_interface/software_version.html",
                               software=software, version=version)
Beispiel #23
0
def update_relations():
    """Update AircraftBeacon and ReceiverBeacon relations"""

    # Create missing Receiver from ReceiverBeacon
    available_receivers = session.query(Receiver.name) \
        .subquery()

    missing_receiver_query = session.query(distinct(ReceiverBeacon.name)) \
        .filter(ReceiverBeacon.receiver_id == null()) \
        .filter(~ReceiverBeacon.name.in_(available_receivers))

    ins = insert(Receiver).from_select([Receiver.name], missing_receiver_query)
    session.execute(ins)

    # Create missing Device from AircraftBeacon
    available_addresses = session.query(Device.address) \
        .subquery()

    missing_addresses_query = session.query(distinct(AircraftBeacon.address)) \
        .filter(AircraftBeacon.device_id == null()) \
        .filter(~AircraftBeacon.address.in_(available_addresses))

    ins2 = insert(Device).from_select([Device.address], missing_addresses_query)
    session.execute(ins2)

    # Update AircraftBeacons
    upd = session.query(AircraftBeacon) \
        .filter(AircraftBeacon.device_id == null()) \
        .filter(AircraftBeacon.receiver_id == null()) \
        .filter(AircraftBeacon.address == Device.address) \
        .filter(AircraftBeacon.receiver_name == Receiver.name) \
        .update({AircraftBeacon.device_id: Device.id,
                 AircraftBeacon.receiver_id: Receiver.id},
                synchronize_session='fetch')

    upd2 = session.query(ReceiverBeacon) \
        .filter(ReceiverBeacon.receiver_id == null()) \
        .filter(ReceiverBeacon.receiver_name == Receiver.name) \
        .update({Receiver.name: ReceiverBeacon.receiver_name},
                synchronize_session='fetch')

    session.commit()
    print("Updated {} AircraftBeacons and {} ReceiverBeacons".
          format(upd, upd2))
Beispiel #24
0
    def _add_nodes_filters(self, query, filters):
        if filters is None:
            filters = {}

        if 'associated' in filters:
            if filters['associated']:
                query = query.filter(models.Node.ironic_node_id != sql.null())
            else:
                query = query.filter(models.Node.ironic_node_id == sql.null())
        if 'type' in filters:
            query = query.filter_by(type=filters['type'])
        if 'image_id' in filters:
            query = query.filter_by(image_id=filters['image_id'])
        if 'project_id' in filters:
            query = query.filter_by(project_id=filters['project_id'])
        if 'user_id' in filters:
            query = query.filter_by(user_id=filters['user_id'])

        return query
Beispiel #25
0
    def _add_nodes_filters(self, query, filters):
        if filters is None:
            filters = []

        if 'chassis_uuid' in filters:
            # get_chassis_by_uuid() to raise an exception if the chassis
            # is not found
            chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid'])
            query = query.filter_by(chassis_id=chassis_obj.id)
        if 'associated' in filters:
            if filters['associated']:
                query = query.filter(models.Node.instance_uuid != sql.null())
            else:
                query = query.filter(models.Node.instance_uuid == sql.null())
        if 'reserved' in filters:
            if filters['reserved']:
                query = query.filter(models.Node.reservation != sql.null())
            else:
                query = query.filter(models.Node.reservation == sql.null())
        if 'reserved_by_any_of' in filters:
            query = query.filter(models.Node.reservation.in_(
                filters['reserved_by_any_of']))
        if 'maintenance' in filters:
            query = query.filter_by(maintenance=filters['maintenance'])
        if 'driver' in filters:
            query = query.filter_by(driver=filters['driver'])
        if 'resource_class' in filters:
            query = query.filter_by(resource_class=filters['resource_class'])
        if 'provision_state' in filters:
            query = query.filter_by(provision_state=filters['provision_state'])
        if 'provisioned_before' in filters:
            limit = (timeutils.utcnow() -
                     datetime.timedelta(seconds=filters['provisioned_before']))
            query = query.filter(models.Node.provision_updated_at < limit)
        if 'inspection_started_before' in filters:
            limit = ((timeutils.utcnow()) -
                     (datetime.timedelta(
                         seconds=filters['inspection_started_before'])))
            query = query.filter(models.Node.inspection_started_at < limit)
        if 'console_enabled' in filters:
            query = query.filter_by(console_enabled=filters['console_enabled'])

        return query
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        try:
            down_bindings = (
                context.session.query(RouterL3AgentBinding).
                join(agents_db.Agent).
                filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                       agents_db.Agent.admin_state_up).
                outerjoin(l3_attrs_db.RouterExtraAttributes,
                          l3_attrs_db.RouterExtraAttributes.router_id ==
                          RouterL3AgentBinding.router_id).
                filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha ==
                              sql.false(),
                              l3_attrs_db.RouterExtraAttributes.ha ==
                              sql.null())))

            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    # we need new context to make sure we use different DB
                    # transaction - otherwise we may fetch same agent record
                    # each time due to REPEATABLE_READ isolation level
                    context = n_ctx.get_admin_context()
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                LOG.warning(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
Beispiel #27
0
 def get_down_router_bindings(self, context, agent_dead_limit):
         cutoff = self.get_cutoff_time(agent_dead_limit)
         return (context.session.query(RouterL3AgentBinding).
                 join(agents_db.Agent).
                 filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                 agents_db.Agent.admin_state_up).
                 outerjoin(l3_attrs_db.RouterExtraAttributes,
                 l3_attrs_db.RouterExtraAttributes.router_id ==
                 RouterL3AgentBinding.router_id).filter(
                 sa.or_(
                     l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                     l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
def upgrade():
    op.add_column('nodes', sa.Column('version_id', sa.String(36),
                                     server_default=''))
    op.add_column('nodes', sa.Column('state', sa.Enum(*istate.States.all(),
                                                      name='node_state'),
                                     nullable=False,
                                     default=istate.States.finished,
                                     server_default=istate.States.finished))
    # correct the state: finished -> error if Node.error is not null
    stmt = Node.update().where(Node.c.error != sql.null()).values(
        {'state': op.inline_literal(istate.States.error)})
    op.execute(stmt)
Beispiel #29
0
def bm_node_find_free(context, service_host=None, cpus=None, memory_mb=None, local_gb=None):
    query = model_query(context, models.BareMetalNode, read_deleted="no")
    query = query.filter(models.BareMetalNode.instance_uuid == null())
    if service_host:
        query = query.filter_by(service_host=service_host)
    if cpus is not None:
        query = query.filter(models.BareMetalNode.cpus >= cpus)
    if memory_mb is not None:
        query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
    if local_gb is not None:
        query = query.filter(models.BareMetalNode.local_gb >= local_gb)
    query = _build_node_order_by(query)
    return query.first()
Beispiel #30
0
def _sql_crit(expression, value):
    """Produce an equality expression against the given value.

    This takes into account a value that is actually a collection
    of values, as well as a value of None or collection that contains
    None.

    """

    values = utils.to_list(value, default=(None, ))
    if len(values) == 1:
        if values[0] is None:
            return expression == sql.null()
        else:
            return expression == values[0]
    elif _none_set.intersection(values):
        return sql.or_(
            expression == sql.null(),
            _sql_crit(expression, set(values).difference(_none_set))
        )
    else:
        return expression.in_(values)
Beispiel #31
0
def _get_replica_schedules_filter(context,
                                  replica_id=None,
                                  schedule_id=None,
                                  expired=True):
    now = timeutils.utcnow()
    q = _soft_delete_aware_query(context, models.ReplicaSchedule)
    q = q.join(models.Replica)
    sched_filter = q.filter()
    if is_user_context(context):
        sched_filter = sched_filter.filter(
            models.Replica.project_id == context.tenant)

    if replica_id:
        sched_filter = sched_filter.filter(models.Replica.id == replica_id)
    if schedule_id:
        sched_filter = sched_filter.filter(
            models.ReplicaSchedule.id == schedule_id)
    if not expired:
        sched_filter = sched_filter.filter(
            or_(models.ReplicaSchedule.expiration_date == null(),
                models.ReplicaSchedule.expiration_date > now))
    return sched_filter
Beispiel #32
0
def subscribe():
    """
    Subscribe a limb
    Use this route to subscribe a limb on Virtual Brain API
    ---
    tags:
        - brain
    responses:
        200:
            description: Return json with new limb
    """
    req = request.get_json(force=True)
    name = req['name'] if type(req['name']) is unicode else abort(
        c.CONFLICT, c.TYPE_ERROR.format('name'))
    ip = req['ip'] if type(req['ip']) is unicode else abort(
        c.CONFLICT, c.TYPE_ERROR.format('ip'))
    area = req['area'] if type(req['area']) is unicode else abort(
        c.CONFLICT, c.TYPE_ERROR.format('area'))
    sub_area = req['sub_area'] if 'sub_area' in req else sql.null()
    result = {"name": name, "ip": ip, "area": area, "sub_area": str(sub_area)}
    n_limb = Limb(name=name, ip=ip, area=area, sub_area=sub_area)
    r_limb = db.session.query(Limb).filter(Limb.name == name)
    if r_limb is None:
        db.session.add(n_limb)
        db.session.commit()
    print str(os.environ.get('VIRTUALSKIN_BRAIN_ADMIN'))
    p = subprocess.call('bash {} {}'.format(
        str(os.environ.get('VIRTUALSKIN_BRAIN_ADMIN')) + '/ssh-copy.sh', ip),
                        shell=True)
    if not p:
        p = subprocess.call('bash {} {}'.format(
            str(os.environ.get('VIRTUALSKIN_BRAIN_ADMIN')) + '/ssh-config.sh',
            ip),
                            shell=True)
    result = {"status": p}
    return app.response_class(response=json.dumps(result),
                              status=c.OK,
                              mimetype=c.JSON)
def load_item_into_database(item, item_type, session):

    # Check if item exists
    if item is None:
        return

    # Get Existing Record
    instance = find_instance_from_item(item, item_type, session)

    # If its new, create a new one in the database
    if instance is None:

        # Process Race
        instance = create_new_instance_from_item(item, item_type, session)

    else:

        # Set the new attributes
        for key, value in item.items():

            # Exceptions
            if item_type == 'race':
                # Never move the off_time later
                if key == 'off_time':
                    if instance.off_time is not None:
                        if value > instance.off_time:
                            continue
            if value is None:
                value = null()

            # Set the attributes
            setattr(instance, key, value)

        # Commit changes
        session.commit()

    # Return race instance
    return instance
Beispiel #34
0
def new_file():
    form = FileForm()
    db = OracleDb()
    users = db.sqlalchemy_session.query(OrmUser).all()
    if request.method == 'POST':
        if not form.validate():
            return render_template('file_form.html',
                                   form=form,
                                   form_name="New file",
                                   action="new_file")
        else:
            if request.form.get('owner_select') != 'None':
                owner_id = [
                    user.user_id for user in users
                    if user.user_name == request.form.get('owner_select')
                ][0]
            else:
                owner_id = sql.null()

            file_obj = OrmFile(
                file_name=form.file_name.data,
                file_type=form.file_type.data,
                file_context=form.file_context.data,
                file_owner_id=owner_id,
                file_date=form.file_date.data.strftime("%d-%b-%y"))

            db = OracleDb()
            db.sqlalchemy_session.add(file_obj)
            db.sqlalchemy_session.commit()

            return redirect(url_for('index_file'))

    return render_template('file_form.html',
                           form=form,
                           form_name="New file",
                           action="new_file",
                           users=users)
def create_new_instance_from_item(item, item_type, session):

    # Model Dict
    model_dict = {
        'track': Tracks,
        'race': Races,
        'horse': Horses,
        'jockey': Jockeys,
        'trainer': Trainers,
        'entry': Entries,
        'owner': Owners,
        'entry_pool': EntryPools,
        'payoff': Payoffs,
        'probable': Probables,
        'pick': Picks,
        'workout': Workouts,
        'analysis_probability': AnalysisProbabilities,
        'betting_result': BettingResults,
        'fractional_time': FractionalTimes,
        'point_of_call': PointsOfCall,
        'database_statistic': DatabaseStatistics
    }

    # Fix any nulls
    for key, value in item.items():
        if value is None:
            item[key] = null()

    # Create Instance
    instance = model_dict[item_type](**item)

    # Add and commit
    session.add(instance)
    session.commit()

    # Return Instance
    return instance
Beispiel #36
0
def scan_for_null_records(table, col_name, check_fkeys):
    """Queries the table looking for NULL instances of the given column.

    :param col_name: The name of the column to look for in the table.
    :param check_fkeys: If True, check the table for foreign keys back to the
        instances table and if not found, return.
    :raises: exception.ValidationError: If any records are found.
    """
    if col_name in table.columns:
        # NOTE(mriedem): filter out tables that don't have a foreign key back
        # to the instances table since they could have stale data even if
        # instances.uuid wasn't NULL.
        if check_fkeys:
            fkey_found = False
            fkeys = table.c[col_name].foreign_keys or []
            for fkey in fkeys:
                if fkey.column.table.name == 'instances':
                    fkey_found = True

            if not fkey_found:
                return

        records = len(
            list(table.select().where(table.c[col_name] == null()).execute()))
        if records:
            msg = _("There are %(records)d records in the "
                    "'%(table_name)s' table where the uuid or "
                    "instance_uuid column is NULL. These must be "
                    "manually cleaned up before the migration will pass. "
                    "Consider running the "
                    "'compute-manage db null_instance_uuid_scan' command.") % (
                        {
                            'records': records,
                            'table_name': table.name
                        })
            raise exception.ValidationError(detail=msg)
Beispiel #37
0
def load_seed_data(migrate_engine):
    # Columns in general_seed_data_file.:
    # user_specimen_id, field_call, bcoral_genet_id, bsym_genet_id, reef,
    # region, latitude, longitude, geographic_origin, colony_location,
    # latitude_outplant, longitude_outplant, depth, dist_shore, disease_resist,
    # bleach_resist, mortality, tle, spawning, collector, org,
    # collection_date, contact_email, seq_facility, array_version, public,
    # public_after_date, coral_mlg_clonal_id, symbio_mlg_clonal_id, genetic_coral_species_call, percent_missing_data,
    # percent_apalm, percent_acerv, percent_mixed, affy_id

    collector_table_inserts = 0
    colony_table_inserts = 0
    experiment_table_inserts = 0
    genotype_table_inserts = 0
    person_table_inserts = 0
    phenotype_table_inserts = 0
    reef_table_inserts = 0
    sample_table_inserts = 0
    SAMPLE_ID = 10000

    with open(GENERAL_SEED_DATA_FILE, "r") as fh:
        for i, line in enumerate(fh):
            if i == 0:
                # Skip the header.
                continue
            line = line.rstrip('\r\n')
            items = line.split("\t")
            # Automatically generate the sample_id.
            sample_id = "A%d" % SAMPLE_ID
            SAMPLE_ID += 1
            user_specimen_id = items[0]
            if len(items[1]) == 0:
                field_call = sql.null()
            else:
                field_call = items[1]
            if len(items[2]) == 0:
                bcoral_genet_id = sql.null()
            else:
                bcoral_genet_id = items[2]
            if len(items[3]) == 0:
                bsym_genet_id = sql.null()
            else:
                bsym_genet_id = items[3]
            reef = items[4]
            region = items[5]
            try:
                latitude = "%6f" % float(items[6])
            except Exception:
                latitude = sql.null()
            try:
                longitude = "%6f" % float(items[7])
            except Exception:
                longitude = sql.null()
            if len(items[8]) == 0:
                geographic_origin = sql.null()
            else:
                geographic_origin = items[8]
            if len(items[9]) == 0:
                colony_location = sql.null()
            else:
                colony_location = items[9]
            try:
                latitude_outplant = "%6f" % float(items[10])
            except Exception:
                latitude_outplant = sql.null()
            try:
                longitude_outplant = "%6f" % float(items[11])
            except Exception:
                longitude_outplant = sql.null()
            try:
                depth = int(items[12])
            except Exception:
                depth = 0
            if len(items[13]) == 0:
                dist_shore = sql.null()
            else:
                dist_shore = items[13]
            disease_resist = items[14]
            bleach_resist = items[15]
            mortality = items[16]
            tle = items[17]
            # Convert original spawning value to Boolean.
            spawning = string_as_bool(items[18])
            collector = items[19]
            org = items[20]
            try:
                collection_date = convert_date_string_for_database(items[21])
            except Exception:
                collection_date = localtimestamp(migrate_engine)
            contact_email = items[22]
            seq_facility = items[23]
            array_version = items[24]
            # Convert original public value to Boolean.
            public = string_as_bool(items[25])
            if public:
                public_after_date = sql.null()
            else:
                if len(items[26]) == 0:
                    # Set the value of public_after_date to the default.
                    public_after_date = year_from_now
                else:
                    public_after_date = convert_date_string_for_database(
                        items[26])
            coral_mlg_clonal_id = items[27]
            symbio_mlg_clonal_id = items[28]
            genetic_coral_species_call = items[29]
            try:
                percent_missing_data = "%6f" % float(items[30])
            except Exception:
                percent_missing_data = sql.null()
            try:
                percent_apalm = "%6f" % float(items[31])
            except Exception:
                percent_apalm = sql.null()
            try:
                percent_acerv = "%6f" % float(items[32])
            except Exception:
                percent_acerv = sql.null()
            try:
                percent_mixed = "%6f" % float(items[33])
            except Exception:
                percent_mixed = sql.null()
            affy_id = items[34]

            # Process the experiment items.  Dependent tables: sample.
            table = "experiment"
            # See if we need to add a row to the experiment table.
            cmd = "SELECT id FROM experiment WHERE seq_facility = '%s' AND array_version = '%s'"
            cmd = cmd % (seq_facility, array_version)
            experiment_id = get_primary_id(migrate_engine, table, cmd)
            if experiment_id is None:
                # Add a row to the experiment table.
                cmd = "INSERT INTO experiment VALUES (%s, %s, %s, '%s', '%s')"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), seq_facility,
                             array_version)
                migrate_engine.execute(cmd)
                experiment_table_inserts += 1
                experiment_id = get_latest_id(migrate_engine, table)

            # Process the genotype items.  Dependent tables: sample.
            table = "genotype"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM genotype WHERE coral_mlg_clonal_id = '%s' AND symbio_mlg_clonal_id = '%s' AND genetic_coral_species_call = '%s'"
            cmd = cmd % (coral_mlg_clonal_id, symbio_mlg_clonal_id,
                         genetic_coral_species_call)
            genotype_id = get_primary_id(migrate_engine, table, cmd)
            if genotype_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO genotype VALUES (%s, %s, %s, '%s', '%s', '%s')"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine),
                             coral_mlg_clonal_id, symbio_mlg_clonal_id,
                             genetic_coral_species_call)
                migrate_engine.execute(cmd)
                genotype_table_inserts += 1
                genotype_id = get_latest_id(migrate_engine, table)

            # Process the phenotype items.  Dependent tables: sample.
            table = "phenotype"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM phenotype WHERE disease_resist = '%s' AND bleach_resist = '%s' AND mortality = '%s'"
            cmd += " AND tle = '%s' AND spawning = '%s'"
            cmd = cmd % (disease_resist, bleach_resist, mortality, tle,
                         spawning)
            phenotype_id = get_primary_id(migrate_engine, table, cmd)
            if phenotype_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO phenotype VALUES (%s, %s, %s, '%s', '%s', '%s', '%s', '%s')"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), disease_resist,
                             bleach_resist, mortality, tle, spawning)
                migrate_engine.execute(cmd)
                phenotype_table_inserts += 1
                phenotype_id = get_latest_id(migrate_engine, table)

            # Process the person items.  Dependent tables: collector.
            table = "person"
            # See if we need to add a row to the table.
            if collector.find(" ") > 0:
                # We have a first and last name spearated by a space.
                first_last = collector.split(" ")
                first_name = first_last[0]
                last_name = first_last[1]
                cmd = "SELECT id FROM person WHERE last_name = '%s' AND first_name = '%s' AND email = '%s'" % (
                    last_name, first_name, contact_email)
            else:
                # We have a last name with no first name.
                if len(collector) > 0:
                    last_name = collector
                else:
                    last_name = 'Unknown'
                first_name = sql.null()
                cmd = "SELECT id FROM person WHERE last_name = '%s' and email = '%s'" % (
                    last_name, contact_email)
            person_id = get_primary_id(migrate_engine, table, cmd)
            if person_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO person VALUES (%s, %s, %s, '%s', '%s', '%s', '%s')"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), last_name,
                             first_name, org, contact_email)
                migrate_engine.execute(cmd)
                person_table_inserts += 1
                person_id = get_latest_id(migrate_engine, table)

            # Process the collector items.  Dependent tables: sample.
            table = "collector"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM collector WHERE person_id = %s" % person_id
            collector_id = get_primary_id(migrate_engine, table, cmd)
            if collector_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO collector VALUES (%s, %s, %s, %s, %s)"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), person_id,
                             person_id)
                migrate_engine.execute(cmd)
                collector_table_inserts += 1
                collector_id = get_latest_id(migrate_engine, table)

            # Process the reef items.  Dependent tables: colony.
            table = "reef"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM reef WHERE name = '%s'" % reef
            reef_id = get_primary_id(migrate_engine, table, cmd)
            if reef_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO reef VALUES (%s, %s, %s, '%s', '%s', %s, %s)"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), reef, region,
                             latitude, longitude)
                migrate_engine.execute(cmd)
                reef_table_inserts += 1
                reef_id = get_latest_id(migrate_engine, table)

            # Process the colony items.  Dependent tables: fragment, sample.
            table = "colony"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM colony WHERE latitude = %s AND longitude = %s and reef_id = %s"
            cmd = cmd % (latitude_outplant, longitude_outplant, reef_id)
            colony_id = get_primary_id(migrate_engine, table, cmd)
            if colony_id is None:
                # Add a row to the table.
                cmd = "INSERT INTO colony VALUES (%s, %s, %s, %s, %s, %s, %s)"
                cmd = cmd % (nextval(migrate_engine,
                                     table), localtimestamp(migrate_engine),
                             localtimestamp(migrate_engine), latitude_outplant,
                             longitude_outplant, depth, reef_id)
                migrate_engine.execute(cmd)
                colony_table_inserts += 1
                colony_id = get_latest_id(migrate_engine, table)

            # Process the sample items.  Dependent tables: None.
            table = "sample"
            # See if we need to add a row to the table.
            cmd = "SELECT id FROM sample WHERE sample_id = '%s'" % sample_id
            sample_id_db = get_primary_id(migrate_engine, table, cmd)
            if sample_id_db is None:
                # Add a row to the table.  Values for
                # the following are not in the seed data.
                fragment_id = sql.null()
                taxonomy_id = sql.null()
                dna_extraction_method = sql.null()
                dna_concentration = sql.null()
                percent_missing_data_coral = sql.null()
                percent_missing_data_sym = sql.null()
                percent_reference_coral = sql.null()
                percent_reference_sym = sql.null()
                percent_alternative_coral = sql.null()
                percent_alternative_sym = sql.null()
                percent_hererozygous_coral = sql.null()
                percent_hererozygous_sym = sql.null()
                # id, create_time, update_time, affy_id, sample_id,
                # genotype_id, phenotype_id, experiment_id, colony_id, colony_location,
                # fragment_id, taxonomy_id, collector_id
                cmd = "INSERT INTO sample VALUES (%s, %s, %s, '%s', '%s', %s, %s, %s, %s, %s, %s, %s, %s, "
                if collection_date == "LOCALTIMESTAMP":
                    # collection_date
                    cmd += "%s, "
                else:
                    # collection_date
                    cmd += "'%s', "
                # user_specimen_id, depth, dna_extraction_method, dna_concentration, public,
                # public_after_date, percent_missing_data_coral, percent_missing_data_sym,
                # percent_reference_coral, percent_reference_sym, percent_alternative_coral,
                # percent_alternative_sym, percent_hererozygous_coral, percent_hererozygous_sym
                cmd += "'%s', %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
                cmd = cmd % (
                    nextval(migrate_engine,
                            table), localtimestamp(migrate_engine),
                    localtimestamp(migrate_engine), affy_id, sample_id,
                    genotype_id, phenotype_id, experiment_id, colony_id,
                    colony_location, fragment_id, taxonomy_id, collector_id,
                    collection_date, user_specimen_id, depth,
                    dna_extraction_method, dna_concentration, public,
                    public_after_date, percent_missing_data_coral,
                    percent_missing_data_sym, percent_reference_coral,
                    percent_reference_sym, percent_alternative_coral,
                    percent_alternative_sym, percent_hererozygous_coral,
                    percent_hererozygous_sym)
                migrate_engine.execute(cmd)
                sample_table_inserts += 1
                sample_id = get_latest_id(migrate_engine, table)

    print("Inserted %d rows into the collector table." %
          collector_table_inserts)
    print("Inserted %d rows into the colony table." % colony_table_inserts)
    print("Inserted %d rows into the experiment table." %
          experiment_table_inserts)
    print("Inserted %d rows into the genotype table." % genotype_table_inserts)
    print("Inserted %d rows into the person table." % person_table_inserts)
    print("Inserted %d rows into the phenotype table." %
          phenotype_table_inserts)
    print("Inserted %d rows into the reef table." % reef_table_inserts)
    print("Inserted %d rows into the sample table." % sample_table_inserts)
Beispiel #38
0
 def activo(self):
     return sql.select([sql.column('fecha_finalizacion') == sql.null()])
Beispiel #39
0
def compute_logbook_entries(session=None):
    logger.info("Compute logbook.")

    if session is None:
        session = app.session

    or_args = [
        between(TakeoffLanding.timestamp, '2016-06-28 00:00:00',
                '2016-06-28 23:59:59')
    ]
    or_args = []

    # 'wo' is the window order for the sql window function
    wo = and_(func.date(TakeoffLanding.timestamp), TakeoffLanding.device_id,
              TakeoffLanding.timestamp, TakeoffLanding.airport_id)

    # make a query with current, previous and next "takeoff_landing" event, so we can find complete flights
    sq = session.query(
            TakeoffLanding.device_id,
            func.lag(TakeoffLanding.device_id).over(order_by=wo).label('device_id_prev'),
            func.lead(TakeoffLanding.device_id).over(order_by=wo).label('device_id_next'),
            TakeoffLanding.timestamp,
            func.lag(TakeoffLanding.timestamp).over(order_by=wo).label('timestamp_prev'),
            func.lead(TakeoffLanding.timestamp).over(order_by=wo).label('timestamp_next'),
            TakeoffLanding.track,
            func.lag(TakeoffLanding.track).over(order_by=wo).label('track_prev'),
            func.lead(TakeoffLanding.track).over(order_by=wo).label('track_next'),
            TakeoffLanding.is_takeoff,
            func.lag(TakeoffLanding.is_takeoff).over(order_by=wo).label('is_takeoff_prev'),
            func.lead(TakeoffLanding.is_takeoff).over(order_by=wo).label('is_takeoff_next'),
            TakeoffLanding.airport_id,
            func.lag(TakeoffLanding.airport_id).over(order_by=wo).label('airport_id_prev'),
            func.lead(TakeoffLanding.airport_id).over(order_by=wo).label('airport_id_next')) \
        .filter(*or_args) \
        .subquery()

    # find complete flights (with takeoff and landing on the same day)
    complete_flight_query = session.query(
            sq.c.timestamp.label('reftime'),
            sq.c.device_id.label('device_id'),
            sq.c.timestamp.label('takeoff_timestamp'), sq.c.track.label('takeoff_track'), sq.c.airport_id.label('takeoff_airport_id'),
            sq.c.timestamp_next.label('landing_timestamp'), sq.c.track_next.label('landing_track'), sq.c.airport_id_next.label('landing_airport_id'),
            label('duration', sq.c.timestamp_next - sq.c.timestamp)) \
        .filter(and_(sq.c.is_takeoff == true(), sq.c.is_takeoff_next == false())) \
        .filter(sq.c.device_id == sq.c.device_id_next) \
        .filter(func.date(sq.c.timestamp_next) == func.date(sq.c.timestamp))

    # split complete flights (with takeoff and landing on different days) into one takeoff and one landing
    split_start_query = session.query(
            sq.c.timestamp.label('reftime'),
            sq.c.device_id.label('device_id'),
            sq.c.timestamp.label('takeoff_timestamp'), sq.c.track.label('takeoff_track'), sq.c.airport_id.label('takeoff_airport_id'),
            null().label('landing_timestamp'), null().label('landing_track'), null().label('landing_airport_id'),
            null().label('duration')) \
        .filter(and_(sq.c.is_takeoff == true(), sq.c.is_takeoff_next == false())) \
        .filter(sq.c.device_id == sq.c.device_id_next) \
        .filter(func.date(sq.c.timestamp_next) != func.date(sq.c.timestamp))

    split_landing_query = session.query(
            sq.c.timestamp_next.label('reftime'),
            sq.c.device_id.label('device_id'),
            null().label('takeoff_timestamp'), null().label('takeoff_track'), null().label('takeoff_airport_id'),
            sq.c.timestamp_next.label('landing_timestamp'), sq.c.track_next.label('landing_track'), sq.c.airport_id_next.label('landing_airport_id'),
            null().label('duration')) \
        .filter(and_(sq.c.is_takeoff == true(), sq.c.is_takeoff_next == false())) \
        .filter(sq.c.device_id == sq.c.device_id_next) \
        .filter(func.date(sq.c.timestamp_next) != func.date(sq.c.timestamp))

    # find landings without start
    only_landings_query = session.query(
            sq.c.timestamp.label('reftime'),
            sq.c.device_id.label('device_id'),
            null().label('takeoff_timestamp'), null().label('takeoff_track'), null().label('takeoff_airport_id'),
            sq.c.timestamp.label('landing_timestamp'), sq.c.track.label('landing_track'), sq.c.airport_id.label('landing_airport_id'),
            null().label('duration')) \
        .filter(sq.c.is_takeoff == false()) \
        .filter(or_(sq.c.device_id != sq.c.device_id_prev,
                    sq.c.is_takeoff_prev == false(),
                    sq.c.is_takeoff_prev == null()))

    # find starts without landing
    only_starts_query = session.query(
            sq.c.timestamp.label('reftime'),
            sq.c.device_id.label('device_id'),
            sq.c.timestamp.label('takeoff_timestamp'), sq.c.track.label('takeoff_track'), sq.c.airport_id.label('takeoff_airport_id'),
            null().label('landing_timestamp'), null().label('landing_track'), null().label('landing_airport_id'),
            null().label('duration')) \
        .filter(sq.c.is_takeoff == true()) \
        .filter(or_(sq.c.device_id != sq.c.device_id_next,
                    sq.c.is_takeoff_next == true(),
                    sq.c.is_takeoff_next == null()))

    # unite all computated flights
    union_query = complete_flight_query.union(
            split_start_query,
            split_landing_query,
            only_landings_query,
            only_starts_query) \
        .subquery()

    # if a logbook entry exist --> update it
    upd = update(Logbook) \
        .where(and_(Logbook.device_id == union_query.c.device_id,
                    union_query.c.takeoff_airport_id != null(),
                    union_query.c.landing_airport_id != null(),
                    or_(and_(Logbook.takeoff_airport_id == union_query.c.takeoff_airport_id,
                             Logbook.takeoff_timestamp == union_query.c.takeoff_timestamp,
                             Logbook.landing_airport_id == null()),
                        and_(Logbook.takeoff_airport_id == null(),
                             Logbook.landing_airport_id == union_query.c.landing_airport_id,
                             Logbook.landing_timestamp == union_query.c.landing_timestamp)))) \
        .values({"takeoff_timestamp": union_query.c.takeoff_timestamp,
                 "takeoff_track": union_query.c.takeoff_track,
                 "takeoff_airport_id": union_query.c.takeoff_airport_id,
                 "landing_timestamp": union_query.c.landing_timestamp,
                 "landing_track": union_query.c.landing_track,
                 "landing_airport_id": union_query.c.landing_airport_id,
                 "duration": union_query.c.duration})

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.debug("Updated logbook entries: {}".format(update_counter))

    # if a logbook entry doesnt exist --> insert it
    new_logbook_entries = session.query(union_query) \
        .filter(~exists().where(
            and_(Logbook.device_id == union_query.c.device_id,
                 or_(and_(Logbook.takeoff_airport_id == union_query.c.takeoff_airport_id,
                          Logbook.takeoff_timestamp == union_query.c.takeoff_timestamp),
                     and_(Logbook.takeoff_airport_id == null(),
                          union_query.c.takeoff_airport_id == null())),
                 or_(and_(Logbook.landing_airport_id == union_query.c.landing_airport_id,
                          Logbook.landing_timestamp == union_query.c.landing_timestamp),
                     and_(Logbook.landing_airport_id == null(),
                          union_query.c.landing_airport_id == null())))))

    ins = insert(Logbook).from_select(
        (Logbook.reftime, Logbook.device_id, Logbook.takeoff_timestamp,
         Logbook.takeoff_track, Logbook.takeoff_airport_id,
         Logbook.landing_timestamp, Logbook.landing_track,
         Logbook.landing_airport_id, Logbook.duration), new_logbook_entries)

    result = session.execute(ins)
    insert_counter = result.rowcount
    session.commit()
    logger.debug("New logbook entries: {}".format(insert_counter))

    return "{}/{}".format(update_counter, insert_counter)
Beispiel #40
0
def update_receivers(session, logger=None):
    """Update receivers with stats."""

    if logger is None:
        logger = app.logger

    receiver_stats = (session.query(
        distinct(ReceiverStats.receiver_id).label("receiver_id"),
        func.first_value(ReceiverStats.firstseen).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case([(ReceiverStats.firstseen == null(), None)],
                          else_=ReceiverStats.date).asc().nullslast()).label(
                              "firstseen"),
        func.first_value(ReceiverStats.lastseen).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case([(ReceiverStats.lastseen == null(), None)],
                          else_=ReceiverStats.date).desc().nullslast()).label(
                              "lastseen"),
        func.first_value(ReceiverStats.location_wkt).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case([(ReceiverStats.location_wkt == null(), None)],
                          else_=ReceiverStats.date).desc().nullslast()).label(
                              "location_wkt"),
        func.first_value(ReceiverStats.altitude).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case([(ReceiverStats.altitude == null(), None)],
                          else_=ReceiverStats.date).desc().nullslast()).label(
                              "altitude"),
        func.first_value(ReceiverStats.version).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case(
                [(ReceiverStats.version == null(), None)],
                else_=ReceiverStats.date).desc().nullslast()).label("version"),
        func.first_value(ReceiverStats.platform).over(
            partition_by=ReceiverStats.receiver_id,
            order_by=case([(ReceiverStats.platform == null(), None)],
                          else_=ReceiverStats.date).desc().nullslast()).label(
                              "platform"),
    ).order_by(ReceiverStats.receiver_id).subquery())

    upd = (update(Receiver).where(
        and_(Receiver.id == receiver_stats.c.receiver_id)).values({
            "firstseen":
            receiver_stats.c.firstseen,
            "lastseen":
            receiver_stats.c.lastseen,
            "location":
            receiver_stats.c.location_wkt,
            "altitude":
            receiver_stats.c.altitude,
            "version":
            receiver_stats.c.version,
            "platform":
            receiver_stats.c.platform,
        }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.warn("Updated {} Receivers".format(update_counter))

    return "Updated {} Receivers".format(update_counter)
Beispiel #41
0
def create_device_stats(session, date, logger=None):
    """Add/update device stats."""

    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(DeviceStats).filter(
        DeviceStats.date == date).delete()

    # Since "distinct count" does not work in window functions we need a work-around for receiver counting
    sq = (session.query(
        AircraftBeacon,
        func.dense_rank().over(
            partition_by=AircraftBeacon.device_id,
            order_by=AircraftBeacon.receiver_id).label("dr")).filter(
                and_(between(AircraftBeacon.timestamp, start, end),
                     AircraftBeacon.device_id != null())).filter(
                         or_(AircraftBeacon.error_count == 0,
                             AircraftBeacon.error_count == null())).subquery())

    # Calculate stats, firstseen, lastseen and last values != NULL
    device_stats = session.query(
        distinct(sq.c.device_id).label("device_id"),
        literal(date).label("date"),
        func.max(
            sq.c.dr).over(partition_by=sq.c.device_id).label("receiver_count"),
        func.max(sq.c.altitude).over(
            partition_by=sq.c.device_id).label("max_altitude"),
        func.count(sq.c.device_id).over(
            partition_by=sq.c.device_id).label("aircraft_beacon_count"),
        func.first_value(sq.c.name).over(
            partition_by=sq.c.device_id,
            order_by=case(
                [(sq.c.name == null(), None)],
                else_=sq.c.timestamp).asc().nullslast()).label("name"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.device_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).asc().nullslast()).label("firstseen"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.device_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("lastseen"),
        func.first_value(sq.c.aircraft_type).over(
            partition_by=sq.c.device_id,
            order_by=case([(sq.c.aircraft_type == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "aircraft_type"),
        func.first_value(sq.c.stealth).over(
            partition_by=sq.c.device_id,
            order_by=case(
                [(sq.c.stealth == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("stealth"),
        func.first_value(sq.c.software_version).over(
            partition_by=sq.c.device_id,
            order_by=case([(sq.c.software_version == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "software_version"),
        func.first_value(sq.c.hardware_version).over(
            partition_by=sq.c.device_id,
            order_by=case([(sq.c.hardware_version == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "hardware_version"),
        func.first_value(sq.c.real_address).over(
            partition_by=sq.c.device_id,
            order_by=case([(sq.c.real_address == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "real_address"),
    ).subquery()

    # And insert them
    ins = insert(DeviceStats).from_select(
        [
            DeviceStats.device_id,
            DeviceStats.date,
            DeviceStats.receiver_count,
            DeviceStats.max_altitude,
            DeviceStats.aircraft_beacon_count,
            DeviceStats.name,
            DeviceStats.firstseen,
            DeviceStats.lastseen,
            DeviceStats.aircraft_type,
            DeviceStats.stealth,
            DeviceStats.software_version,
            DeviceStats.hardware_version,
            DeviceStats.real_address,
        ],
        device_stats,
    )
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
    logger.debug("DeviceStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter))

    return "DeviceStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter)
    NsfHerdAcademicField('J03', 'J', 'Education', 'Non-STEM Fields',
                         'Education and Human Science'),
    NsfHerdAcademicField('J04', 'J', 'Humanities', 'Non-STEM Fields',
                         'Arts and Sciences'),
    NsfHerdAcademicField('J05', 'J', 'Law', 'Non-STEM Fields', 'Law'),
    NsfHerdAcademicField('J06', 'J', 'Social Work', 'Non-STEM Fields',
                         'Arts and Sciences'),
    NsfHerdAcademicField('J07', 'J', 'Visual and Performing Arts',
                         'Non-STEM Fields', 'Fine and Performing Arts'),
    NsfHerdAcademicField('J08', 'J', 'Other Non-STEM Fields',
                         'Non-STEM Fields', 'Architecture')
])

print("Reading date dimension data.")
dates = pd.read_csv('data/date_dimension.csv')
dates = dates.fillna(sql.null())

print("Reading state data")
states = pd.read_csv('data/states.csv', encoding='utf-16')

print("Reading custom peer list data")
lists = pd.read_csv('data/ipeds_custom_peer_lists.csv', encoding='utf-8')

session = Session()

try:
    # bulk insert objects for federal agencies and academic fields (NSF)
    print('\nPopulating dimension tables.')
    session.bulk_save_objects(objects)
    print('\tFinished populating nsf_herd_federal_agencies.')
    print('\tFinished populating nsf_herd_academic_fields.')
Beispiel #43
0
def jobtype(jobtype_id):
    """
    UI endpoint for a single jobtype. Allows showing and updating the jobtype
    """
    jobtype = JobType.query.filter_by(id=jobtype_id).first()
    if not jobtype:
        return (render_template("pyfarm/error.html",
                                error="Jobtype %s not found" % jobtype_id),
                NOT_FOUND)

    if request.method == "POST":
        with db.session.no_autoflush:
            jobtype.description = request.form["description"]

            new_version = JobTypeVersion(jobtype=jobtype)
            new_version.max_batch = request.form["max_batch"].strip() or\
                sql.null()
            new_version.batch_contiguous =\
                ("batch_contiguous" in request.form and
                 request.form["batch_contiguous"] == "true")
            new_version.no_automatic_start_time =\
                ("no_automatic_start_time" in request.form and
                 request.form["no_automatic_start_time"] == "true")
            new_version.classname = request.form["classname"]
            new_version.code = request.form["code"]

            max_version, = db.session.query(func.max(
                JobTypeVersion.version)).filter_by(jobtype=jobtype).first()
            new_version.version = (max_version or 0) + 1

            previous_version = JobTypeVersion.query.filter_by(
                jobtype=jobtype).order_by(desc(
                    JobTypeVersion.version)).first()
            if previous_version:
                for requirement in previous_version.software_requirements:
                    new_requirement = JobTypeSoftwareRequirement()
                    new_requirement.jobtype_version = new_version
                    new_requirement.software = requirement.software
                    new_requirement.min_version = requirement.min_version
                    new_requirement.max_version = requirement.max_version
                    db.session.add(new_requirement)

            db.session.add(jobtype)
            db.session.add(new_version)
            db.session.commit()

            flash("Jobtype %s updated to version %s" %
                  (jobtype.name, new_version.version))

            return redirect(
                url_for("single_jobtype_ui", jobtype_id=jobtype.id), SEE_OTHER)

    else:
        latest_version = JobTypeVersion.query.filter_by(
            jobtype=jobtype).order_by(desc(JobTypeVersion.version)).first()
        if not latest_version:
            return (render_template("pyfarm/error.html",
                                    error="Jobtype %s has no versions" %
                                    jobtype_id), INTERNAL_SERVER_ERROR)

        return render_template("pyfarm/user_interface/jobtype.html",
                               jobtype=jobtype,
                               latest_version=latest_version,
                               software_items=Software.query)
Beispiel #44
0
    def test_reduce_aliased_union_2(self):
        metadata = MetaData()
        page_table = Table('page', metadata, Column('id', Integer,
                           primary_key=True))
        magazine_page_table = Table('magazine_page', metadata,
                                    Column('page_id', Integer,
                                    ForeignKey('page.id'),
                                    primary_key=True))
        classified_page_table = Table('classified_page', metadata,
                Column('magazine_page_id', Integer,
                ForeignKey('magazine_page.page_id'), primary_key=True))

        # this is essentially the union formed by the ORM's
        # polymorphic_union function. we define two versions with
        # different ordering of selects.
        #
        # the first selectable has the "real" column
        # classified_page.magazine_page_id

        pjoin = union(
                    select([
                        page_table.c.id,
                        magazine_page_table.c.page_id,
                        classified_page_table.c.magazine_page_id
                    ]).
                     select_from(
                        page_table.join(magazine_page_table).
                        join(classified_page_table)),

                    select([
                        page_table.c.id,
                        magazine_page_table.c.page_id,
                        cast(null(), Integer).label('magazine_page_id')
                    ]).
                    select_from(page_table.join(magazine_page_table))
                    ).alias('pjoin')
        eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id,
            pjoin.c.page_id, pjoin.c.magazine_page_id])),
            util.column_set([pjoin.c.id]))

        # the first selectable has a CAST, which is a placeholder for
        # classified_page.magazine_page_id in the second selectable.
        # reduce_columns needs to take into account all foreign keys
        # derived from pjoin.c.magazine_page_id. the UNION construct
        # currently makes the external column look like that of the
        # first selectable only.

        pjoin = union(select([
                        page_table.c.id,
                        magazine_page_table.c.page_id,
                        cast(null(), Integer).label('magazine_page_id')
                      ]).
                      select_from(page_table.join(magazine_page_table)),

                      select([
                        page_table.c.id,
                        magazine_page_table.c.page_id,
                        classified_page_table.c.magazine_page_id
                      ]).
                      select_from(page_table.join(magazine_page_table).
                      join(classified_page_table))
                ).alias('pjoin')
        eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id,
            pjoin.c.page_id, pjoin.c.magazine_page_id])),
            util.column_set([pjoin.c.id]))
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        down_bindings = (context.session.query(RouterL3AgentBinding).join(
            agents_db.Agent).filter(
                agents_db.Agent.heartbeat_timestamp < cutoff,
                agents_db.Agent.admin_state_up
            ).outerjoin(
                l3_attrs_db.RouterExtraAttributes,
                l3_attrs_db.RouterExtraAttributes.router_id ==
                RouterL3AgentBinding.router_id).filter(
                    sa.or_(
                        l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                        l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                agent_mode = self._get_agent_mode(binding.l3_agent)
                if agent_mode == constants.L3_AGENT_MODE_DVR:
                    # rescheduling from l3 dvr agent on compute node doesn't
                    # make sense. Router will be removed from that agent once
                    # there are no dvr serviceable ports on that compute node
                    LOG.warn(
                        _LW('L3 DVR agent on node %(host)s is down. '
                            'Not rescheduling from agent in \'dvr\' '
                            'mode.'), {'host': binding.l3_agent.host})
                    continue
                LOG.warn(
                    _LW("Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."), {
                            'router': binding.router_id,
                            'agent': binding.l3_agent_id,
                            'dead_time': agent_dead_limit
                        })
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(
                _LE("Exception encountered during router "
                    "rescheduling."))
def upgrade():
    bind = op.get_bind()
    session = orm.Session(bind=bind)

    blocks = [
        {
            "blocknumber":
            22477107,
            "txhash":
            "0xd16cd27f8635e84cbb6d02ecf6a5304bae998381cda1291d4cb690d9159196d9",
            "blockhash":
            "0x82ba068410ac71a70dfe95708b3c4ec08884355d8384423deb2337a6bcd64dd0",
        },
        {
            "blocknumber":
            22477427,
            "txhash":
            "0x135c236ebb4d02a1d2bc13bf946e982871ad59a29ed413bbe6aed2acb60a1f17",
            "blockhash":
            "0x63b16e801cb90f3497043a904e0180a8161d9bbb45add0a9380e277b0befbbba",
        },
        {
            "blocknumber":
            22477062,
            "txhash":
            "0x05771964bc0527268c330b78bb1819998d589d31a95e5f2f8aa846041de1dfee",
            "blockhash":
            "0x8e5f3edd55d1c7f3d6484a8fb7d2a9d825fb53ecb446c8959dd3887fa999d2e7",
        },
        {
            "blocknumber":
            22477832,
            "txhash":
            "0xda0b64477469390eaa3b17098c01f3e03e5b0e52959ffd493804538dda039a92",
            "blockhash":
            "0x705d01ed50cf8f1d83686acc19c9d0bdd52493b3552532543629cdf07fba6005",
        },
    ]
    for block in blocks:
        block_query = (session.query(Block).filter(
            Block.blockhash == block["blockhash"],
            Block.number == block["blocknumber"],
        ).first())
        if not block_query:
            # If the block is missing, do not continue
            # This migration is a production hotfix
            return

    session.flush()

    tracks = [
        Track(
            track_id=470883,
            blocknumber=22477107,
            is_current=True,
            is_delete=False,
            txhash=
            "0xd16cd27f8635e84cbb6d02ecf6a5304bae998381cda1291d4cb690d9159196d9",
            blockhash=
            "0x82ba068410ac71a70dfe95708b3c4ec08884355d8384423deb2337a6bcd64dd0",
            owner_id=3538,
            route_id="",
            cover_art=None,
            cover_art_sizes="QmZFgQiupP2KWoqYZGqJ83zEwHtXMj6QhnNNfDCLpTax4q",
            title="???",
            tags="electro,house,2021",
            genre="Electro",
            mood="Fiery",
            credits_splits=None,
            remix_of=None,
            create_date=None,
            release_date="Fri Jul 30 2021 17:29:24 GMT+0200",
            length=None,
            file_type=None,
            description=None,
            license="All rights reserved",
            isrc=None,
            iswc=None,
            track_segments=[
                {
                    "multihash":
                    "QmW7kQwqbZcYWQHtYdXK6KezSCqQ1vRdwhdWtWvTT79rnk",
                    "duration": 6.016,
                },
                {
                    "multihash":
                    "QmYQEkQvPGHgkMLGhvfYcZdd385KmoifAer5kxnAuYhf4E",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmVj5uvFPBpa9635hL17EcYPJAQURm6rqJdvociR6Qk1Q2",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmW5QyPo5kUSXs1v5oJTrLWdFB1JHciWKcMpw3GbY7a7rX",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmZeRjqj9SC1depFGw5vLwdAiayBr7rdrtSiUKYtoFaJix",
                    "duration": 6,
                },
                {
                    "multihash":
                    "QmQ2dPUnNMay2Yxj6pj7Jw7WvpodXoQxZvUTdSKjMcoF56",
                    "duration": 0.021333,
                },
            ],
            download={
                "is_downloadable": False,
                "requires_follow": False,
                "cid": None,
            },
            updated_at="July-30-2021 10:29:40",
            created_at="July-30-2021 10:29:40",
            is_unlisted=False,
            field_visibility={
                "genre": True,
                "mood": True,
                "tags": True,
                "share": True,
                "play_count": True,
                "remixes": True,
            },
            stem_of=null(),
        ),
        Track(
            track_id=470884,
            blocknumber=22477427,
            is_current=True,
            is_delete=False,
            txhash=
            "0x135c236ebb4d02a1d2bc13bf946e982871ad59a29ed413bbe6aed2acb60a1f17",
            blockhash=
            "0x63b16e801cb90f3497043a904e0180a8161d9bbb45add0a9380e277b0befbbba",
            owner_id=3538,
            route_id="",
            cover_art=None,
            cover_art_sizes="QmZFgQiupP2KWoqYZGqJ83zEwHtXMj6QhnNNfDCLpTax4q",
            title="???",
            tags="electro,house,2021",
            genre="Electro",
            mood="Fiery",
            credits_splits=None,
            remix_of=None,
            create_date=None,
            release_date="Fri Jul 30 2021 16:55:42 GMT+0200",
            length=None,
            file_type=None,
            description=None,
            license="All rights reserved",
            isrc=None,
            iswc=None,
            track_segments=[
                {
                    "multihash":
                    "QmW7kQwqbZcYWQHtYdXK6KezSCqQ1vRdwhdWtWvTT79rnk",
                    "duration": 6.016,
                },
                {
                    "multihash":
                    "QmYQEkQvPGHgkMLGhvfYcZdd385KmoifAer5kxnAuYhf4E",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmVj5uvFPBpa9635hL17EcYPJAQURm6rqJdvociR6Qk1Q2",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmW5QyPo5kUSXs1v5oJTrLWdFB1JHciWKcMpw3GbY7a7rX",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmZeRjqj9SC1depFGw5vLwdAiayBr7rdrtSiUKYtoFaJix",
                    "duration": 6,
                },
                {
                    "multihash":
                    "QmQ2dPUnNMay2Yxj6pj7Jw7WvpodXoQxZvUTdSKjMcoF56",
                    "duration": 0.021333,
                },
            ],
            download={
                "is_downloadable": False,
                "requires_follow": False,
                "cid": None,
            },
            updated_at="July-30-2021 10:56:20",
            created_at="July-30-2021 10:56:20",
            is_unlisted=False,
            field_visibility={
                "genre": True,
                "mood": True,
                "tags": True,
                "share": True,
                "play_count": True,
                "remixes": True,
            },
            stem_of=null(),
        ),
        Track(
            track_id=470892,
            blocknumber=22477062,
            is_current=True,
            is_delete=False,
            txhash=
            "0x05771964bc0527268c330b78bb1819998d589d31a95e5f2f8aa846041de1dfee",
            blockhash=
            "0x8e5f3edd55d1c7f3d6484a8fb7d2a9d825fb53ecb446c8959dd3887fa999d2e7",
            owner_id=3538,
            route_id="",
            cover_art=None,
            cover_art_sizes="QmZFgQiupP2KWoqYZGqJ83zEwHtXMj6QhnNNfDCLpTax4q",
            title="???",
            tags="electro,house,2021",
            genre="Electro",
            mood="Fiery",
            credits_splits=None,
            remix_of=None,
            create_date=None,
            release_date="Fri Jul 30 2021 16:24:35 GMT+0200",
            length=None,
            file_type=None,
            description=None,
            license="All rights reserved",
            isrc=None,
            iswc=None,
            track_segments=[
                {
                    "multihash":
                    "QmW7kQwqbZcYWQHtYdXK6KezSCqQ1vRdwhdWtWvTT79rnk",
                    "duration": 6.016,
                },
                {
                    "multihash":
                    "QmYQEkQvPGHgkMLGhvfYcZdd385KmoifAer5kxnAuYhf4E",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmVj5uvFPBpa9635hL17EcYPJAQURm6rqJdvociR6Qk1Q2",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmW5QyPo5kUSXs1v5oJTrLWdFB1JHciWKcMpw3GbY7a7rX",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmZeRjqj9SC1depFGw5vLwdAiayBr7rdrtSiUKYtoFaJix",
                    "duration": 6,
                },
                {
                    "multihash":
                    "QmQ2dPUnNMay2Yxj6pj7Jw7WvpodXoQxZvUTdSKjMcoF56",
                    "duration": 0.021333,
                },
            ],
            download={
                "is_downloadable": False,
                "requires_follow": False,
                "cid": None,
            },
            updated_at="July-30-2021 10:25:55",
            created_at="July-30-2021 10:25:55",
            is_unlisted=False,
            field_visibility={
                "genre": True,
                "mood": True,
                "tags": True,
                "share": True,
                "play_count": True,
                "remixes": True,
            },
            stem_of=null(),
        ),
        Track(
            track_id=470910,
            blocknumber=22477832,
            is_current=True,
            is_delete=False,
            txhash=
            "0xda0b64477469390eaa3b17098c01f3e03e5b0e52959ffd493804538dda039a92",
            blockhash=
            "0x705d01ed50cf8f1d83686acc19c9d0bdd52493b3552532543629cdf07fba6005",
            owner_id=3538,
            route_id="",
            cover_art=None,
            cover_art_sizes="QmZFgQiupP2KWoqYZGqJ83zEwHtXMj6QhnNNfDCLpTax4q",
            title="???",
            tags="electro,house,2021",
            genre="Electro",
            mood="Fiery",
            credits_splits=None,
            remix_of=None,
            create_date=None,
            release_date="Fri Jul 30 2021 17:29:24 GMT+0200",
            length=None,
            file_type=None,
            description=None,
            license="All rights reserved",
            isrc=None,
            iswc=None,
            track_segments=[
                {
                    "multihash":
                    "QmW7kQwqbZcYWQHtYdXK6KezSCqQ1vRdwhdWtWvTT79rnk",
                    "duration": 6.016,
                },
                {
                    "multihash":
                    "QmYQEkQvPGHgkMLGhvfYcZdd385KmoifAer5kxnAuYhf4E",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmVj5uvFPBpa9635hL17EcYPJAQURm6rqJdvociR6Qk1Q2",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmW5QyPo5kUSXs1v5oJTrLWdFB1JHciWKcMpw3GbY7a7rX",
                    "duration": 5.994667,
                },
                {
                    "multihash":
                    "QmZeRjqj9SC1depFGw5vLwdAiayBr7rdrtSiUKYtoFaJix",
                    "duration": 6,
                },
                {
                    "multihash":
                    "QmQ2dPUnNMay2Yxj6pj7Jw7WvpodXoQxZvUTdSKjMcoF56",
                    "duration": 0.021333,
                },
            ],
            download={
                "is_downloadable": False,
                "requires_follow": False,
                "cid": None,
            },
            updated_at="July-30-2021 11:30:05",
            created_at="July-30-2021 11:30:05",
            is_unlisted=False,
            field_visibility={
                "genre": True,
                "mood": True,
                "tags": True,
                "share": True,
                "play_count": True,
                "remixes": True,
            },
            stem_of=null(),
        ),
    ]
    session.add_all(tracks)

    track_routes = [
        TrackRoute(
            slug="oWXPm",
            title_slug="oWXPm",
            collision_id=0,
            owner_id=3538,
            track_id=470883,
            is_current=True,
            txhash=
            "0xd16cd27f8635e84cbb6d02ecf6a5304bae998381cda1291d4cb690d9159196d9",
            blockhash=
            "0x82ba068410ac71a70dfe95708b3c4ec08884355d8384423deb2337a6bcd64dd0",
            blocknumber=22477107,
        ),
        TrackRoute(
            slug="6YBxG",
            title_slug="6YBxG",
            collision_id=0,
            owner_id=3538,
            track_id=470884,
            is_current=True,
            blocknumber=22477427,
            txhash=
            "0x135c236ebb4d02a1d2bc13bf946e982871ad59a29ed413bbe6aed2acb60a1f17",
            blockhash=
            "0x63b16e801cb90f3497043a904e0180a8161d9bbb45add0a9380e277b0befbbba",
        ),
        TrackRoute(
            slug="KV8xz",
            title_slug="KV8xz",
            collision_id=0,
            owner_id=3538,
            track_id=470892,
            is_current=True,
            blocknumber=22477062,
            txhash=
            "0x05771964bc0527268c330b78bb1819998d589d31a95e5f2f8aa846041de1dfee",
            blockhash=
            "0x8e5f3edd55d1c7f3d6484a8fb7d2a9d825fb53ecb446c8959dd3887fa999d2e7",
        ),
        TrackRoute(
            slug="Nl2xw",
            title_slug="Nl2xw",
            collision_id=0,
            owner_id=3538,
            track_id=470910,
            is_current=True,
            blocknumber=22477832,
            txhash=
            "0xda0b64477469390eaa3b17098c01f3e03e5b0e52959ffd493804538dda039a92",
            blockhash=
            "0x705d01ed50cf8f1d83686acc19c9d0bdd52493b3552532543629cdf07fba6005",
        ),
    ]
    session.add_all(track_routes)

    session.query(SkippedTransaction).filter(
        SkippedTransaction.txhash.in_([
            "0x05771964bc0527268c330b78bb1819998d589d31a95e5f2f8aa846041de1dfee",
            "0xd16cd27f8635e84cbb6d02ecf6a5304bae998381cda1291d4cb690d9159196d9",
            "0x135c236ebb4d02a1d2bc13bf946e982871ad59a29ed413bbe6aed2acb60a1f17",
            "0xda0b64477469390eaa3b17098c01f3e03e5b0e52959ffd493804538dda039a92",
        ])).delete(synchronize_session="fetch")

    session.commit()
Beispiel #47
0
def create_receiver_stats(session, date, logger=None):
    """Add/update receiver stats."""

    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(ReceiverStats).filter(
        ReceiverStats.date == date).delete()

    # Select one day
    sq = session.query(ReceiverBeacon).filter(
        between(ReceiverBeacon.timestamp, start, end)).subquery()

    # Calculate stats, firstseen, lastseen and last values != NULL
    receiver_stats = session.query(
        distinct(sq.c.receiver_id).label("receiver_id"),
        literal(date).label("date"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).asc().nullslast()).label("firstseen"),
        func.first_value(sq.c.timestamp).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.timestamp == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("lastseen"),
        func.first_value(sq.c.location).over(
            partition_by=sq.c.receiver_id,
            order_by=case([(sq.c.location == null(), None)],
                          else_=sq.c.timestamp).desc().nullslast()).label(
                              "location_wkt"),
        func.first_value(sq.c.altitude).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.altitude == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("altitude"),
        func.first_value(sq.c.version).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.version == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("version"),
        func.first_value(sq.c.platform).over(
            partition_by=sq.c.receiver_id,
            order_by=case(
                [(sq.c.platform == null(), None)],
                else_=sq.c.timestamp).desc().nullslast()).label("platform"),
    ).subquery()

    # And insert them
    ins = insert(ReceiverStats).from_select(
        [
            ReceiverStats.receiver_id,
            ReceiverStats.date,
            ReceiverStats.firstseen,
            ReceiverStats.lastseen,
            ReceiverStats.location_wkt,
            ReceiverStats.altitude,
            ReceiverStats.version,
            ReceiverStats.platform,
        ],
        receiver_stats,
    )
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
    logger.warn("ReceiverStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter))

    # Update aircraft_beacon_count, aircraft_count and max_distance
    aircraft_beacon_stats = (session.query(
        AircraftBeacon.receiver_id,
        func.count(AircraftBeacon.timestamp).label("aircraft_beacon_count"),
        func.count(func.distinct(
            AircraftBeacon.device_id)).label("aircraft_count"),
        func.max(AircraftBeacon.distance).label("max_distance"),
    ).filter(
        and_(between(AircraftBeacon.timestamp, start,
                     end), AircraftBeacon.error_count == 0,
             AircraftBeacon.quality <= MAX_PLAUSIBLE_QUALITY,
             AircraftBeacon.relay == null())).group_by(
                 AircraftBeacon.receiver_id).subquery())

    upd = (update(ReceiverStats).where(
        and_(ReceiverStats.date == date, ReceiverStats.receiver_id ==
             aircraft_beacon_stats.c.receiver_id)).values({
                 "aircraft_beacon_count":
                 aircraft_beacon_stats.c.aircraft_beacon_count,
                 "aircraft_count":
                 aircraft_beacon_stats.c.aircraft_count,
                 "max_distance":
                 aircraft_beacon_stats.c.max_distance
             }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.warn("Updated {} ReceiverStats".format(update_counter))

    return "ReceiverStats for {}: {} deleted, {} inserted, {} updated".format(
        date, deleted_counter, insert_counter, update_counter)
Beispiel #48
0
def create_jobtype():
    if request.method == "GET":
        return render_template("pyfarm/user_interface/jobtype_create.html",
                               jobtypes=JobType.query,
                               software_items=Software.query)
    else:
        with db.session.no_autoflush:
            jobtype = JobType()
            jobtype.name = request.form["name"]
            jobtype.description = request.form["description"]
            jobtype_version = JobTypeVersion()
            jobtype_version.jobtype = jobtype
            jobtype_version.version = 1
            jobtype_version.max_batch = request.form["max_batch"].strip() or\
                sql.null()
            jobtype_version.batch_contiguous =\
                ("batch_contiguous" in request.form and
                 request.form["batch_contiguous"] == "true")
            jobtype_version.no_automatic_start_time =\
                ("no_automatic_start_time" in request.form and
                 request.form["no_automatic_start_time"] == "true")
            jobtype_version.classname = request.form["classname"]
            jobtype_version.code = request.form["code"]

            requirements = zip(request.form.getlist("software"),
                               request.form.getlist("min_version"),
                               request.form.getlist("min_version"))

            for requirement_tuple in requirements:
                software = Software.query.filter_by(
                    id=int(requirement_tuple[0])).first()
                if not software:
                    return (render_template("pyfarm/error.html",
                                            error="Software %s not found" %
                                            requirement_tuple[0]), NOT_FOUND)
                requirement = JobTypeSoftwareRequirement()
                requirement.software = software
                requirement.jobtype_version = jobtype_version

                if requirement_tuple[1] != "":
                    minimum_version = SoftwareVersion.query.filter_by(
                        id=int(requirement_tuple[1])).first()
                    if not minimum_version:
                        return (render_template(
                            "pyfarm/error.html",
                            error="Software version %s not "
                            "found" % requirement_tuple[1]), NOT_FOUND)
                    if minimum_version.software != software:
                        return (render_template(
                            "pyfarm/error.html",
                            error="Software version %s "
                            "does not belong to software %s" %
                            (minimum_version.version, software.software)),
                                BAD_REQUEST)
                    requirement.min_version = minimum_version

                if requirement_tuple[2] != "":
                    maximum_version = SoftwareVersion.query.filter_by(
                        id=int(requirement_tuple[2])).first()
                    if not maximum_version:
                        return (render_template(
                            "pyfarm/error.html",
                            error="Software version %s not "
                            "found" % requirement_tuple[2]), NOT_FOUND)
                    if maximum_version.software != software:
                        return (render_template(
                            "pyfarm/error.html",
                            error="Software version %s "
                            "does not belong to software %s" %
                            (maximum_version.version, software.software)),
                                BAD_REQUEST)
                    requirement.max_version = maximum_version

                db.session.add(requirement)

            db.session.add(jobtype)
            db.session.add(jobtype_version)
            db.session.commit()

        flash("Jobtype %s created" % jobtype.name)

        return redirect(url_for('jobtypes_index_ui'), SEE_OTHER)
Beispiel #49
0
def update_device_stats_jumps(session, date, logger=None):
    """Update device stats jumps."""

    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # speed limits in m/s (values above indicates a unplausible position / jump)
    max_horizontal_speed = 1000
    max_vertical_speed = 100
    max_jumps = 10  # threshold for an 'ambiguous' device

    # find consecutive positions for a device
    sq = (session.query(
        AircraftBeacon.device_id,
        AircraftBeacon.timestamp,
        func.lead(AircraftBeacon.timestamp).over(
            partition_by=AircraftBeacon.device_id,
            order_by=AircraftBeacon.timestamp).label("timestamp_next"),
        AircraftBeacon.location_wkt,
        func.lead(AircraftBeacon.location_wkt).over(
            partition_by=AircraftBeacon.device_id,
            order_by=AircraftBeacon.timestamp).label("location_next"),
        AircraftBeacon.altitude,
        func.lead(AircraftBeacon.altitude).over(
            partition_by=AircraftBeacon.device_id,
            order_by=AircraftBeacon.timestamp).label("altitude_next"),
    ).filter(
        and_(between(AircraftBeacon.timestamp, start, end),
             AircraftBeacon.error_count == 0)).subquery())

    # calc vertial and horizontal speed between points
    sq2 = (session.query(
        sq.c.device_id,
        (func.st_distancesphere(sq.c.location_next, sq.c.location) /
         (func.extract("epoch", sq.c.timestamp_next) -
          func.extract("epoch", sq.c.timestamp))).label("horizontal_speed"),
        ((sq.c.altitude_next - sq.c.altitude) /
         (func.extract("epoch", sq.c.timestamp_next) -
          func.extract("epoch", sq.c.timestamp))).label("vertical_speed"),
    ).filter(
        and_(sq.c.timestamp != null(), sq.c.timestamp_next != null(),
             sq.c.timestamp < sq.c.timestamp_next)).subquery())

    # ... and find and count 'jumps'
    sq3 = (session.query(
        sq2.c.device_id,
        func.sum(
            case([(or_(
                func.abs(sq2.c.horizontal_speed) > max_horizontal_speed,
                func.abs(sq2.c.vertical_speed) > max_vertical_speed), 1)],
                 else_=0)).label("jumps")).group_by(
                     sq2.c.device_id).subquery())

    upd = update(DeviceStats).where(
        and_(DeviceStats.date == date,
             DeviceStats.device_id == sq3.c.device_id)).values({
                 "ambiguous":
                 sq3.c.jumps > max_jumps,
                 "jumps":
                 sq3.c.jumps
             })

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.warn("Updated {} DeviceStats jumps".format(update_counter))

    return "DeviceStats jumps for {}: {} updated".format(date, update_counter)
Beispiel #50
0
def add_jobtype_software_requirement(jobtype_id):
    with db.session.no_autoflush:
        jobtype = JobType.query.filter_by(id=jobtype_id).first()
        if not jobtype:
            return (render_template("pyfarm/error.html",
                                    error="Jobtype %s not found" % jobtype_id),
                    NOT_FOUND)

        previous_version = JobTypeVersion.query.filter_by(
            jobtype=jobtype).order_by(desc(JobTypeVersion.version)).first()
        if not previous_version:
            return (render_template("pyfarm/error.html",
                                    error="Jobtype %s has no versions" %
                                    jobtype_id), INTERNAL_SERVER_ERROR)

        new_version = JobTypeVersion(jobtype=jobtype)
        new_version.max_batch = previous_version.max_batch or sql.null()
        new_version.batch_contiguous = previous_version.batch_contiguous
        new_version.no_automatic_start_time =\
            previous_version.no_automatic_start_time
        new_version.classname = previous_version.classname
        new_version.code = previous_version.code
        new_version.version = previous_version.version + 1

        for requirement in previous_version.software_requirements:
            retained_requirement = JobTypeSoftwareRequirement()
            retained_requirement.jobtype_version = new_version
            retained_requirement.software = requirement.software
            retained_requirement.min_version = requirement.min_version
            retained_requirement.max_version = requirement.max_version
            db.session.add(retained_requirement)

        new_requirement = JobTypeSoftwareRequirement()
        new_requirement.jobtype_version = new_version

        new_requirement_software = Software.query.filter_by(
            id=request.form["software"]).first()
        if not new_requirement_software:
            return (render_template("pyfarm/error.html",
                                    error="Software %s not found" %
                                    request.form["software"]), NOT_FOUND)
        new_requirement.software = new_requirement_software

        if request.form["minimum_version"] != "":
            min_version = SoftwareVersion.query.filter_by(
                id=request.form["minimum_version"]).first()
            if not min_version:
                return (render_template(
                    "pyfarm/error.html",
                    error="Software version %s not "
                    "found" % request.form["minimum_version"]), NOT_FOUND)
            if min_version.software != new_requirement_software:
                return (render_template(
                    "pyfarm/error.html",
                    error="Software version %s does "
                    "not belong to software %s" %
                    (min_version.version, new_requirement_software.software)),
                        BAD_REQUEST)
            new_requirement.min_version = min_version

        if request.form["maximum_version"] != "":
            max_version = SoftwareVersion.query.filter_by(
                id=request.form["maximum_version"]).first()
            if not max_version:
                return (render_template(
                    "pyfarm/error.html",
                    error="Software version %s not "
                    "found" % request.form["maximum_version"]), NOT_FOUND)
            if max_version.software != new_requirement_software:
                return (render_template(
                    "pyfarm/error.html",
                    error="Software version %s does "
                    "not belong to software %s" %
                    (max_version.version, new_requirement_software.software)),
                        BAD_REQUEST)
            new_requirement.max_version = max_version

        db.session.add(new_version)
        db.session.add(new_requirement)
        db.session.commit()

    flash("Software requirement has been added to jobtype %s" % jobtype.name)

    return redirect(url_for("single_jobtype_ui", jobtype_id=jobtype.id),
                    SEE_OTHER)
Beispiel #51
0
def update_devices(session, logger=None):
    """Update devices with stats."""

    if logger is None:
        logger = app.logger

    device_stats = (session.query(
        distinct(DeviceStats.device_id).label("device_id"),
        func.first_value(DeviceStats.name).over(
            partition_by=DeviceStats.device_id,
            order_by=case(
                [(DeviceStats.name == null(), None)],
                else_=DeviceStats.date).desc().nullslast()).label("name"),
        func.first_value(DeviceStats.firstseen).over(
            partition_by=DeviceStats.device_id,
            order_by=case(
                [(DeviceStats.firstseen == null(), None)],
                else_=DeviceStats.date).asc().nullslast()).label("firstseen"),
        func.max(DeviceStats.lastseen).over(
            partition_by=DeviceStats.device_id,
            order_by=case(
                [(DeviceStats.lastseen == null(), None)],
                else_=DeviceStats.date).desc().nullslast()).label("lastseen"),
        func.first_value(DeviceStats.aircraft_type).over(
            partition_by=DeviceStats.device_id,
            order_by=case([(DeviceStats.aircraft_type == null(), None)],
                          else_=DeviceStats.date).desc().nullslast()).label(
                              "aircraft_type"),
        func.first_value(DeviceStats.stealth).over(
            partition_by=DeviceStats.device_id,
            order_by=case(
                [(DeviceStats.stealth == null(), None)],
                else_=DeviceStats.date).desc().nullslast()).label("stealth"),
        func.first_value(DeviceStats.software_version).over(
            partition_by=DeviceStats.device_id,
            order_by=case([(DeviceStats.software_version == null(), None)],
                          else_=DeviceStats.date).desc().nullslast()).label(
                              "software_version"),
        func.first_value(DeviceStats.hardware_version).over(
            partition_by=DeviceStats.device_id,
            order_by=case([(DeviceStats.hardware_version == null(), None)],
                          else_=DeviceStats.date).desc().nullslast()).label(
                              "hardware_version"),
        func.first_value(DeviceStats.real_address).over(
            partition_by=DeviceStats.device_id,
            order_by=case([(DeviceStats.real_address == null(), None)],
                          else_=DeviceStats.date).desc().nullslast()).label(
                              "real_address"),
    ).order_by(DeviceStats.device_id).subquery())

    upd = (update(Device).where(
        and_(Device.id == device_stats.c.device_id)).values({
            "name":
            device_stats.c.name,
            "firstseen":
            device_stats.c.firstseen,
            "lastseen":
            device_stats.c.lastseen,
            "aircraft_type":
            device_stats.c.aircraft_type,
            "stealth":
            device_stats.c.stealth,
            "software_version":
            device_stats.c.software_version,
            "hardware_version":
            device_stats.c.hardware_version,
            "real_address":
            device_stats.c.real_address,
        }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.warn("Updated {} Devices".format(update_counter))

    return "Updated {} Devices".format(update_counter)
def parse_track_event(self, session, update_task, entry, event_type,
                      track_record, block_timestamp):
    event_args = entry["args"]
    # Just use block_timestamp as integer
    block_datetime = datetime.utcfromtimestamp(block_timestamp)

    if event_type == track_event_types_lookup["new_track"]:
        track_record.created_at = block_datetime

        track_metadata_digest = event_args._multihashDigest.hex()
        track_metadata_hash_fn = event_args._multihashHashFn
        buf = multihash.encode(bytes.fromhex(track_metadata_digest),
                               track_metadata_hash_fn)
        track_metadata_multihash = multihash.to_b58_string(buf)
        logger.info(f"track metadata ipld : {track_metadata_multihash}")

        # If the IPLD is blacklisted, do not keep processing the current entry
        # continue with the next entry in the update_track_events list
        if is_blacklisted_ipld(session, track_metadata_multihash):
            logger.info(
                f"Encountered blacklisted metadata CID {track_metadata_multihash} in indexing new track"
            )
            return None

        owner_id = event_args._trackOwnerId
        handle = (session.query(User.handle).filter(
            User.user_id == owner_id, User.is_current == True).first())[0]
        track_record.owner_id = owner_id

        track_record.is_delete = False

        track_metadata = update_task.ipfs_client.get_metadata(
            track_metadata_multihash, track_metadata_format)

        track_record = populate_track_record_metadata(track_record,
                                                      track_metadata, handle)
        track_record.metadata_multihash = track_metadata_multihash

        # if cover_art CID is of a dir, store under _sizes field instead
        if track_record.cover_art:
            # If CID is in IPLD blacklist table, do not continue with indexing
            if is_blacklisted_ipld(session, track_record.cover_art):
                logger.info(
                    f"Encountered blacklisted cover art CID {track_record.cover_art} in indexing new track"
                )
                return None

            logger.warning(
                f"tracks.py | Processing track cover art {track_record.cover_art}"
            )
            track_record.cover_art_sizes = track_record.cover_art
            track_record.cover_art = None

        update_stems_table(session, track_record, track_metadata)
        update_remixes_table(session, track_record, track_metadata)

    if event_type == track_event_types_lookup["update_track"]:
        upd_track_metadata_digest = event_args._multihashDigest.hex()
        upd_track_metadata_hash_fn = event_args._multihashHashFn
        update_buf = multihash.encode(bytes.fromhex(upd_track_metadata_digest),
                                      upd_track_metadata_hash_fn)
        upd_track_metadata_multihash = multihash.to_b58_string(update_buf)
        logger.info(
            f"update track metadata ipld : {upd_track_metadata_multihash}")

        # If the IPLD is blacklisted, do not keep processing the current entry
        # continue with the next entry in the update_track_events list
        if is_blacklisted_ipld(session, upd_track_metadata_multihash):
            logger.info(
                f"Encountered blacklisted metadata CID {upd_track_metadata_multihash} in indexing update track"
            )
            return None

        owner_id = event_args._trackOwnerId
        handle = (session.query(User.handle).filter(
            User.user_id == owner_id, User.is_current == True).first())[0]
        track_record.owner_id = owner_id
        track_record.is_delete = False

        track_metadata = update_task.ipfs_client.get_metadata(
            upd_track_metadata_multihash, track_metadata_format)

        track_record = populate_track_record_metadata(track_record,
                                                      track_metadata, handle)
        track_record.metadata_multihash = upd_track_metadata_multihash

        # All incoming cover art is intended to be a directory
        # Any write to cover_art field is replaced by cover_art_sizes
        if track_record.cover_art:
            # If CID is in IPLD blacklist table, do not continue with indexing
            if is_blacklisted_ipld(session, track_record.cover_art):
                logger.info(
                    f"Encountered blacklisted cover art CID {track_record.cover_art} in indexing update track"
                )
                return None

            logger.info(
                f"tracks.py | Processing track cover art {track_record.cover_art}"
            )
            track_record.cover_art_sizes = track_record.cover_art
            track_record.cover_art = None

        update_remixes_table(session, track_record, track_metadata)

    if event_type == track_event_types_lookup["delete_track"]:
        track_record.is_delete = True
        track_record.stem_of = null()
        track_record.remix_of = null()
        logger.info(f"Removing track : {track_record.track_id}")

    track_record.updated_at = block_datetime

    return track_record
Beispiel #53
0
 def col(name, table):
     try:
         return colnamemaps[table][name]
     except KeyError:
         return sql.cast(sql.null(), types[name]).label(name)
Beispiel #54
0
 def resolve_buildings(self, info):
     query = Building.get_query(info)
     return query.join(CityModel).filter(
         BuildingModel.is_active == 1, CityModel.latitude != null(), CityModel.longitude != null(),
     ).all()
Beispiel #55
0
def crawl_webpage(url):

    r = requests.get(url)
    rj = r.json()

    logger.debug(rj['rowCount'])

    url_melde = 'https://www.intensivregister.de/api/public/stammdaten/krankenhausstandort/{0}/meldebereiche'

    hospital_entries = []
    hospital_entries_extended = []
    hospital_beds_entries_extended = []

    len_ = len(rj['data'])
    for i, x in enumerate(rj['data']):
        logger.debug(str(i) + ' / ' + str(len_))

        name = x['krankenhausStandort']['bezeichnung']

        address = str(x['krankenhausStandort']['strasse'])
        address += ' '
        address += str(x['krankenhausStandort']['hausnummer'])
        address += ' '
        address += str(x['krankenhausStandort']['plz'])
        address += ' '
        address += str(x['krankenhausStandort']['ort'])

        state = x['krankenhausStandort']['bundesland']

        location = '('
        location += str(x['krankenhausStandort']['position']['longitude'])
        location += ' '
        location += str(x['krankenhausStandort']['position']['latitude'])
        location += ')'

        icu_low_state = legends(x['bettenStatus']['statusLowCare'])

        icu_high_state = legends(x['bettenStatus']['statusHighCare'])

        ecmo_state = legends(x['bettenStatus']['statusECMO'])

        last_update = x['meldezeitpunkt']

        hospital_id = int(x['id'])

        covid_cases = get_attr_if_exists(x, 'faelleCovidAktuell')
        if covid_cases is not None:
            covid_cases = int(covid_cases)
        else:
            covid_cases = null()

        tmp_url = url_melde.format(str(hospital_id))
        r_melde = requests.get(tmp_url)
        rj_melde = r_melde.json()

        contact = ''
        for y in rj_melde:
            if len(y['ansprechpartner']):
                for c in y['ansprechpartner']:
                    contact += str(
                        c['zustaendigkeit']['bezeichnung']) + ' : ' + str(
                            c['nachname']) + ' : Tel. ' + str(
                                c['telefonnummer'])
                    contact += ', '
            else:
                for c in y['tags']:
                    contact += c
                    contact += ', '

            hospital_id_beds = int(y['krankenhausStandort']['id'])
            name_beds = y['bezeichnung']
            casesecmoyear_beds = int(y['faelleEcmoJahr'])
            available_beds_beds = int(y['bettenPlankapazitaet'])
            description_beds = y['bettenPlankapazitaet']
            last_update_beds = y['letzteMeldung']
            if last_update_beds is None:
                last_update_beds = null()
            beds_beds = y['krankenhausStandort'][
                'intensivmedizinischePlanbetten']
            if beds_beds is not None:
                beds_beds = int(beds_beds)
            else:
                beds_beds = null()

            beds = [
                hospital_id_beds, name_beds, available_beds_beds,
                casesecmoyear_beds, beds_beds, description_beds,
                last_update_beds
            ]

            hospital_beds_entries_extended.append(beds)

        contact = contact[:-2]
        if len(contact) > 255:
            contact = contact[:250]

        hospital_entry = [
            name, address, contact, state, icu_low_state, icu_high_state,
            ecmo_state, last_update, location
        ]

        hospital_entries.append(hospital_entry)

        hospital_entry = [
            hospital_id, name, address, contact, state, icu_low_state,
            icu_high_state, ecmo_state, last_update, location, covid_cases
        ]

        hospital_entries_extended.append(hospital_entry)

    return hospital_entries, hospital_entries_extended, hospital_beds_entries_extended
Beispiel #56
0
            'toc_code': 'control',
            'toi_code': 'academic_institution',
            'med_sch_flag': 'medical_school_flag',
            '01.a': 'federal_government',
            '01.b': 'state_and_local_government',
            '01.c': 'business',
            '01.d': 'nonprofit_organizations',
            '01.e': 'institutional_funds',
            '01.f': 'other_sources',
            '01.g': 'total_rd_expenses',
            '04': 'medical_school_expenses',
            'NA_01': 'arra_funds'
        })

    # replace NaN with database-compliant nulls
    institutions = institutions.fillna(sql.null())

    # insert data into dbo.survey_records
    session = Session()

    try:
        print('Attempting to insert {:,} rows for {} into {}.'.format(
            institutions.shape[0], year, NsfHerdInstitution.__tablename__))
        record_deletes = session.query(NsfHerdInstitution).filter(
            NsfHerdInstitution.date_key == date_key).delete(
                synchronize_session=False)
        session.bulk_insert_mappings(
            mapper=NsfHerdInstitution,
            mappings=institutions.to_dict(orient='records'),
            render_nulls=True)
    except Exception as e:
Beispiel #57
0
 def filter_query(self, query, view):
     return query.filter((
         view.model.owner_id == self.get_request_credentials())
                         | (view.model.owner_id == sql.null()))
            'agency_key',
            'academic_field_key',
            'ncses_inst_id',
            'unitid',
            'expenditure']

    herd = herd[keepers].groupby(['inst_id',
            'date_key',
            'funding_type',
            'agency_key',
            'academic_field_key',
            'ncses_inst_id',
            'unitid']).sum().reset_index()

    herd['expenditure'] = herd.expenditure.fillna(0)
    herd = herd.fillna(sql.null())

    # insert data into dbo.survey_records
    session = Session()

    try:
        print('Attempting to insert {:,} rows for {} into {}.'.format(herd.shape[0], year, NsfHerdDetail.__tablename__))
        record_deletes = session.query(NsfHerdDetail).filter(NsfHerdDetail.date_key==date_key).delete(synchronize_session=False)
        session.bulk_insert_mappings(mapper = NsfHerdDetail,
                                    mappings = herd.to_dict(orient='records'),
                                    render_nulls = True)
    except Exception as e:
        session.rollback()
        print(str(e))
        print('No data were altered due to error.')
    else:
Beispiel #59
0
def bm_node_get_associated(context, service_host=None):
    query = model_query(context, models.BareMetalNode, read_deleted="no").\
                filter(models.BareMetalNode.instance_uuid != null())
    if service_host:
        query = query.filter_by(service_host=service_host)
    return query.all()
Beispiel #60
0
def update_entries(session, date, logger=None):
    """Create receiver coverage stats for Melissas ognrange."""

    if logger is None:
        logger = app.logger

    logger.info("Compute receiver coverages.")

    (start, end) = date_to_timestamps(date)

    # Filter aircraft beacons
    sq = (session.query(
        AircraftBeacon.location_mgrs_short, AircraftBeacon.receiver_id,
        AircraftBeacon.signal_quality, AircraftBeacon.altitude,
        AircraftBeacon.device_id).filter(
            and_(between(AircraftBeacon.timestamp, start,
                         end), AircraftBeacon.location_mgrs_short != null(),
                 AircraftBeacon.receiver_id != null(),
                 AircraftBeacon.device_id != null())).subquery())

    # ... and group them by reduced MGRS, receiver and date
    query = (session.query(
        sq.c.location_mgrs_short,
        sq.c.receiver_id,
        func.cast(date, Date).label("date"),
        func.max(sq.c.signal_quality).label("max_signal_quality"),
        func.min(sq.c.altitude).label("min_altitude"),
        func.max(sq.c.altitude).label("max_altitude"),
        func.count(sq.c.altitude).label("aircraft_beacon_count"),
        func.count(func.distinct(sq.c.device_id)).label("device_count"),
    ).group_by(sq.c.location_mgrs_short, sq.c.receiver_id).subquery())

    # if a receiver coverage entry exist --> update it
    upd = (update(ReceiverCoverage).where(
        and_(
            ReceiverCoverage.location_mgrs_short ==
            query.c.location_mgrs_short,
            ReceiverCoverage.receiver_id == query.c.receiver_id,
            ReceiverCoverage.date == date)).values({
                "max_signal_quality":
                query.c.max_signal_quality,
                "min_altitude":
                query.c.min_altitude,
                "max_altitude":
                query.c.max_altitude,
                "aircraft_beacon_count":
                query.c.aircraft_beacon_count,
                "device_count":
                query.c.device_count,
            }))

    result = session.execute(upd)
    update_counter = result.rowcount
    session.commit()
    logger.debug(
        "Updated receiver coverage entries: {}".format(update_counter))

    # if a receiver coverage entry doesnt exist --> insert it
    new_coverage_entries = session.query(query).filter(~exists().where(
        and_(
            ReceiverCoverage.location_mgrs_short ==
            query.c.location_mgrs_short, ReceiverCoverage.receiver_id ==
            query.c.receiver_id, ReceiverCoverage.date == date)))

    ins = insert(ReceiverCoverage).from_select(
        (
            ReceiverCoverage.location_mgrs_short,
            ReceiverCoverage.receiver_id,
            ReceiverCoverage.date,
            ReceiverCoverage.max_signal_quality,
            ReceiverCoverage.min_altitude,
            ReceiverCoverage.max_altitude,
            ReceiverCoverage.aircraft_beacon_count,
            ReceiverCoverage.device_count,
        ),
        new_coverage_entries,
    )

    result = session.execute(ins)
    insert_counter = result.rowcount
    session.commit()

    finish_message = "ReceiverCoverage: {} inserted, {} updated".format(
        insert_counter, update_counter)
    logger.debug(finish_message)
    return finish_message