示例#1
0
    def list(self, service_filter={}):
        service_bundle = Bundle('service',
                Service.id, Service.name, Service.description, Service.protocol,
                Service.status, Service.ports, Service.version, Service.owned,
                Service.interface_id,
                func.count(distinct(Vulnerability.id)).label('vuln_count'), EntityMetadata.couchdb_id,\
                EntityMetadata.revision, EntityMetadata.update_time, EntityMetadata.update_user,\
                EntityMetadata.update_action, EntityMetadata.creator, EntityMetadata.create_time,\
                EntityMetadata.update_controller_action, EntityMetadata.owner, EntityMetadata.command_id,
                func.count(distinct(Credential.id)).label("credentials_count"))

        query = self._session.query(service_bundle).\
                group_by(Service.id).\
                outerjoin(EntityMetadata, EntityMetadata.id == Service.entity_metadata_id).\
                outerjoin(Vulnerability, Service.id == Vulnerability.service_id).group_by(Service.id).\
                outerjoin(Interface, Interface.id == Service.interface_id).\
                outerjoin(Credential, (Credential.service_id == Service.id) and (Credential.host_id == None)).\
                outerjoin(Host, Host.id == Interface.host_id)

        query = apply_search_filter(query, self.COLUMNS_MAP, None, service_filter, self.STRICT_FILTERING)

        # 'LIKE' for search services started by hostId.%.%
        if service_filter.get('hostIdCouchdb') is not None:
            query = query.filter(
                EntityMetadata.couchdb_id.like(service_filter.get('hostIdCouchdb') + ".%.%"))

        raw_services = query.all()
        services = [self.__get_service_data(r.service) for r in raw_services]
        result = {'services': services}
        return result
示例#2
0
def my_view(request):
    try:
        # query statistics for the main page
        result_dict = dict()
        result_dict["orphan_msrun_count"] = \
            DBSession.query(func.count(distinct(MsRun.filename))).filter(MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).one()[0]
        result_dict["all_msrun_count"] = DBSession.query(func.count(distinct(MsRun.filename))).one()[0]
        result_dict["sources_count"] = DBSession.query(func.count(distinct(Source.sample_id))).one()[0]
        result_dict["trash_count"] = DBSession.query(func.count(distinct(MsRun.filename))).filter(MsRun.flag_trash == 1).one()[0]


        result_dict["orphan_msrun"] = json.dumps(
        DBSession.query(distinct(MsRun.filename).label("orphan_ms_run")).filter(MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).order_by(MsRun.filename.desc()).limit(10).all())

        #SELECT (organ), count(organ) from Source group by organ
        sources = DBSession.query(Source.organ, func.count(Source.organ)).group_by(Source.organ).order_by(func.count(Source.organ).desc()).all()
        merged_sources = dict()
        source_acc = 0
        for i in range(0,len(sources)):
            if i < 6:
                merged_sources[sources[i][0]] = sources[i][1]
            else:
                source_acc += sources[i][1]
        merged_sources["others"] = source_acc
        result_dict["sources"] = json.dumps(merged_sources)

        return result_dict
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
示例#3
0
def wemo(request, command=None, device=None):
    if not command:
        raise NoCommandSpecified()
    if device:
        device = urllib.unquote(device)

    if command == 'ls':
        devices = session.query(distinct(WemoTimeSeries.device_name)).all()
        response = '<p>'
        for d in devices:
            response += "'" + d[0] + "'</br>"
        response += '</p>'
    
    elif command == 'on':
        wemo = WemoCC().on(device)
        response = '<p>' + device + ' turned on.</p>'
    
    elif command == 'off':
        wemo = WemoCC().off(device)
        response = '<p>' + device + ' turned off.</p>'

    elif command == 'completion':
        completion = []
        devices = session.query(distinct(WemoTimeSeries.device_name)).all()
        for d in devices:
            completion.append(d.lower().replace(' ', '_'))
        response = json.dumps(completion)

    return HttpResponse(response)
示例#4
0
def home(request):

    session.rollback()
    session.commit()

    filter_date = datetime.datetime.utcnow() - datetime.timedelta(seconds=60)
    
    wemo_device_count = session.query(func.count(distinct(WemoTimeSeries.device_name))).first()[0]
    wemo = session.query(WemoTimeSeries).order_by(WemoTimeSeries.datetime.desc()).limit(wemo_device_count).all()

    hue_device_count = session.query(func.count(distinct(HueTimeSeries.device_name))).first()[0]
    hue = session.query(HueTimeSeries).order_by(HueTimeSeries.datetime.desc()).limit(hue_device_count).all()

    nest = session.query(NestTimeSeries).order_by(NestTimeSeries.datetime.desc()).limit(1).first()

    apex = session.query(ApexTimeSeries).filter(ApexTimeSeries.value != None).filter(ApexTimeSeries.datetime>filter_date).all()
   
    roomba_device_count = session.query(func.count(distinct(RoombaTimeSeries.device_name))).first()[0]
    roomba = session.query(RoombaTimeSeries).order_by(RoombaTimeSeries.datetime.desc()).limit(roomba_device_count).all()
    
    f = Flower()
    flower = f.get_data(.001)[-1]
    
    return render(request, template_name='home.html', dictionary={'wemo': wemo, 
                                                                  'hue': hue, 
                                                                  'nest': nest,
                                                                  'apex': apex,
                                                                  'roomba': roomba,
                                                                  'flower': flower,
                                                                 })
示例#5
0
def populate_debuggerquarters(session):
    # For efficiency, we restrict our search to those debuggers who have touched a bug in some way
    first = lambda tup:tup[0]
    assignee_ids = set(map(first, session.query(distinct(Bug.assignee_id)).all()))
    bugeventful_ids = set(map(first, session.query(distinct(BugEvent.dbid)).all()))

    bugtouchers = set.union(assignee_ids, bugeventful_ids)

    n = 0
    for month in session.query(Month):
        ## XXX testing deleteme
        if month.first <= datetime.date(2008, 6, 4) <= month.last:
            print "asdff 233 should be in here"
        else:
            continue
        quarter = Quarter(first=month)
        graph = MozIRCGraph.load(quarter, session)
        print "Graph with %d vertices" % (len(graph.dbid_to_vertex))
        for (dbid, vertex) in graph.dbid_to_vertex.iteritems():
            if dbid not in bugtouchers:
                continue
            dq = DebuggerQuarter(dbid=dbid, first=quarter.first)

            dq.constraint = vertex.constraint()[0]
            dq.closeness = vertex.closeness()
            dq.clustering = graph.g.transitivity_local_undirected([vertex.index])[0]
            dq.indegree = vertex.indegree()
            dq.outdegree = vertex.outdegree()
            dq.betweenness = vertex.betweenness()
            dq.effective_size = graph.effective_size(vertex)

            session.add(dq)
            n += 1

    print "Added %d dms" % (n)
示例#6
0
def populate_debuggermonths(session):
    # For efficiency, we restrict our search to those debuggers who have touched a bug in some way
    first = lambda tup:tup[0]
    assignee_ids = set(map(first, session.query(distinct(Bug.assignee_id)).all()))
    bugeventful_ids = set(map(first, session.query(distinct(BugEvent.dbid)).all()))

    bugtouchers = set.union(assignee_ids, bugeventful_ids)
    n = 0
    for month in session.query(Month):
        graph = MozIRCGraph.load(month, session)
        print "Got graph with %d vertices" % (len(graph.dbid_to_vertex))
        for (dbid, vertex) in graph.dbid_to_vertex.iteritems():
            if dbid not in bugtouchers:
                continue
            dm = DebuggerMonth(dbid=dbid, monthid=month.id)

            dm.constraint = vertex.constraint()[0]
            dm.closeness = vertex.closeness()
            dm.clustering = graph.g.transitivity_local_undirected([vertex.index])[0]
            dm.indegree = vertex.indegree()
            dm.outdegree = vertex.outdegree()
            dm.betweenness = vertex.betweenness()
            dm.effective_size = graph.effective_size(vertex)

            session.add(dm)
            n += 1

    print "Added %d dms" % (n)
示例#7
0
def archive(year=0, month=0):
    """
    Shows archived threads, meaning all threads sorted by year and month.
    If no year is passed, then a list of all the years for which we have archived topics is displayed.
    If a year is passed, a list of all the months for which there are archived topics is displayed.
    If a month is passed, we show all archived topics for that month.

    @todo Need to pass the timezone to the extract() function.

    :param year:
    :type year: int
    :param month:
    :type month: int
    """
    if year > 0 and month > 0:
        elements = Topic.query.filter(func.extract('YEAR', Topic.date_created) == year,
                                      func.extract('MONTH', Topic.date_created) == month).all()
    else:
        if year > 0 and month == 0:
            results = db.session.query(distinct(func.extract('MONTH', Topic.date_created))) \
                .filter(func.extract('YEAR', Topic.date_created) == year)
        if year == 0 and month == 0:
            results = db.session.query(distinct(func.extract('YEAR', Topic.date_created)))
        elements = []
        for result in results.all():
            elements.append(int(result[0]))
    return render_template('archive.html', elements=elements, year=year, month=month)
示例#8
0
  def counts(self, terms, types=None, contact_id=None,
             extra_params=None, extra_columns=None):
    """Prepare the search query, but return only count for each of
     the requested objects."""
    extra_params = extra_params or {}
    extra_columns = extra_columns or {}
    model_names = self._get_grouped_types(types, extra_params)
    query = db.session.query(
        self.record_type.type, func.count(distinct(
            self.record_type.key)), literal(""))
    query = query.filter(self.get_permissions_query(model_names))
    query = query.filter(self._get_filter_query(terms))
    query = self.search_get_owner_query(query, types, contact_id)
    query = query.group_by(self.record_type.type)
    all_extra_columns = dict(extra_columns.items() +
                             [(p, p) for p in extra_params
                              if p not in extra_columns])
    if not all_extra_columns:
      return query.all()

    # Add extra_params and extra_colums:
    for key, value in all_extra_columns.iteritems():
      extra_q = db.session.query(self.record_type.type,
                                 func.count(distinct(self.record_type.key)),
                                 literal(key))
      extra_q = extra_q.filter(self.get_permissions_query([value]))
      extra_q = extra_q.filter(self._get_filter_query(terms))
      extra_q = self.search_get_owner_query(extra_q, [value], contact_id)
      extra_q = self._add_extra_params_query(extra_q,
                                             value,
                                             extra_params.get(key, None))
      extra_q = extra_q.group_by(self.record_type.type)
      query = query.union(extra_q)
    return query.all()
示例#9
0
def search_by_tags():
    data = json.loads(request.get_data())
    page = int(request.args.get('page', 1))
    per_page = int(request.args.get('per_page', 10))
    tags_list = data.get('tags')
    if not isinstance(tags_list, list):
        tags_list = tags_list.split(",")
    post_id_select = select([distinct(tags.c.post_id)])\
        .where(tags.c.tag.in_(tags_list))\
        .limit(per_page).offset((page-1) * per_page)
    post_total = select([distinct(tags.c.post_id)])\
        .where(tags.c.tag.in_(tags_list))
    conn = engine.connect()
    result_post_id = conn.execute(post_id_select)
    result_post_total = conn.execute(post_total)
    conn.close()
    if result_post_id.rowcount:
        post_id_list = [dict(r).get('post_id') for r in result_post_id.fetchall()]
        all_matched_posts = posts.select().where(posts.c.id.in_(post_id_list))
        conn = engine.connect()
        res_all_matched_posts = conn.execute(all_matched_posts)
        conn.close()
        res = {
            'total': result_post_total.rowcount,
            'data': [dict(r) for r in res_all_matched_posts.fetchall()]
        }
        return jsonify(res)
    else:
        res = {
            'total': result_post_total.rowcount,
            'data': []
        }
        return jsonify(res)
示例#10
0
文件: host.py 项目: lmcthbe/faraday
    def __query_database(self, search=None, page=0, page_size=0, order_by=None, order_dir=None, host_filter={}):
        host_bundle = Bundle('host', Host.id, Host.name, Host.os, Host.description, Host.owned,\
            Host.default_gateway_ip, Host.default_gateway_mac, EntityMetadata.couchdb_id,\
            EntityMetadata.revision, EntityMetadata.update_time, EntityMetadata.update_user,\
            EntityMetadata.update_action, EntityMetadata.creator, EntityMetadata.create_time,\
            EntityMetadata.update_controller_action, EntityMetadata.owner, EntityMetadata.command_id,\
            func.group_concat(distinct(Interface.id)).label('interfaces'),\
            func.count(distinct(Vulnerability.id)).label('vuln_count'),\
            func.count(distinct(Service.id)).label('open_services_count'))

        query = self._session.query(host_bundle)\
                             .outerjoin(EntityMetadata, EntityMetadata.id == Host.entity_metadata_id)\
                             .outerjoin(Interface, Host.id == Interface.host_id)\
                             .outerjoin(Vulnerability, Host.id == Vulnerability.host_id)\
                             .outerjoin(Service, (Host.id == Service.host_id) & (Service.status.in_(('open', 'running', 'opened'))))\
                             .group_by(Host.id)

        # Apply pagination, sorting and filtering options to the query
        query = sort_results(query, self.COLUMNS_MAP, order_by, order_dir, default=Host.id)
        query = apply_search_filter(query, self.COLUMNS_MAP, search, host_filter, self.STRICT_FILTERING)
        count = get_count(query, count_col=Host.id)

        if page_size:
            query = paginate(query, page, page_size)

        results = query.all()

        return results, count
示例#11
0
文件: api.py 项目: alsukurr/cs373-idb
    def get(self):
        """
        Gathers all events from the database with their data
        return a json object representing the events
        """
        
        session = db.loadSession()

        # Make the sql query
        result = session.query(
            # What to select
            # distinct because of multiple medals per event
            distinct(db.Event.id),
            db.Event.name,
            db.Sport.name,
            func.array_agg_cust(distinct(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season])))
            )\
            .select_from(db.Event)\
            .join(db.Sport)\
            .join(db.Medal)\
            .join(db.Olympics)\
            .group_by(db.Event.id,
            db.Event.name,
            db.Sport.name)\
            .all() # Actually executes the query and returns a list of tuples
        
        session.close()
        
        keys = ('id', 'name', 'sport', ('olympics', ('id', 'year', 'season')))
        
        all_events_dict = list_of_dict_to_dict_of_dict(add_keys(keys, row) for row in result)
        
        return jsonify(all_events_dict)
示例#12
0
def json_export(outfile):
    db = Session()
    data = dict()
    data["materials"] = [
        it.to_dict(["locations"])
        for it in db.query(Material).options(subqueryload(Material.locations))
    ]
    data["materialTypes"] = [
        dict(label=it[0], value=it[0])
        for it in db.query(distinct(Material.type)).order_by(Material.type.asc())
        if it[0]
    ]
    data["blueprints"] = [
        it.to_dict(["ingredients"])
        for it in db.query(Blueprint)\
                .options(subqueryload(Blueprint.ingredients))\
                .options(subqueryload("ingredients.material"))
    ]
    data["blueprintTypes"] = [
        dict(label=it[0], value=it[0])
        for it in db.query(distinct(Blueprint.type)).order_by(Blueprint.type.asc())
        if it[0]
    ]
    with open(outfile, "w") as fp:
        fp.write('CollectorDroneData=')
        json.dump(data, fp)

    db.close()
示例#13
0
    def getSamplingFeatures(self, ids=None, codes=None, uuids=None, type=None, wkt=None, results=False):
        """Retrieve a list of Sampling Feature objects.

        If no arguments are passed to the function, or their values are None,
        all Sampling Feature objects in the database will be returned.

        Args:
            ids (list, optional): List of SamplingFeatureIDs.
            codes (list, optional): List of SamplingFeature Codes.
            uuids (list, optional): List of UUIDs string.
            type (str, optional): Type of Sampling Feature from
                `controlled vocabulary name <http://vocabulary.odm2.org/samplingfeaturetype/>`_.
            wkt (str, optional): SamplingFeature Well Known Text.
            results (bool, optional): Whether or not you want to return only the
                sampling features that have results associated with them.

        Returns:
            list: List of Sampling Feature objects

        Examples:
            >>> READ = ReadODM2(SESSION_FACTORY)
            >>> READ.getSamplingFeatures(ids=[39, 40])
            >>> READ.getSamplingFeatures(codes=['HOME', 'FIELD'])
            >>> READ.getSamplingFeatures(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
            ...                                 '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
            >>> READ.getSamplingFeatures(type='Site')
            >>> READ.getSamplingFeatures(wkt='POINT (30 10)')
            >>> READ.getSamplingFeatures(results=True)
            >>> READ.getSamplingFeatures(type='Site', results=True)

        """
        if results:
            try:
                fas = [x[0] for x in self._session.query(distinct(Results.FeatureActionID)).all()]
            except:
                return None
            sf = [x[0] for x in self._session.query(distinct(FeatureActions.SamplingFeatureID))
                                    .filter(FeatureActions.FeatureActionID.in_(fas)).all()]
            if ids:
                ids = list(set(ids).intersection(sf))
            else:
                ids = sf

        q = self._session.query(SamplingFeatures)

        if type:
            q = q.filter_by(SamplingFeatureTypeCV=type)
        if ids:
            q = q.filter(SamplingFeatures.SamplingFeatureID.in_(ids))
        if codes:
            q = q.filter(SamplingFeatures.SamplingFeatureCode.in_(codes))
        if uuids:
            q = q.filter(SamplingFeatures.SamplingFeatureUUID.in_(uuids))
        if wkt:
            q = q.filter_by(FeatureGeometryWKT=wkt)
        try:
            return q.all()
        except Exception as e:
            print('Error running Query: {}'.format(e))
            return None
示例#14
0
def index(page=None, id=None):
    if 'application/json' not in request.headers.get('Accept', ''):
        return render_template('ember-page.jinja', active_page='statistics')

    name = None

    query = db.session.query(Flight.year.label('year'),
                             func.count('*').label('flights'),
                             func.count(distinct(Flight.pilot_id)).label('pilots'),
                             func.sum(Flight.olc_classic_distance).label('distance'),
                             func.sum(Flight.duration).label('duration'))

    pilots_query = db.session.query(func.count(distinct(Flight.pilot_id)))

    if page == 'pilot':
        pilot = get_requested_record(User, id)
        name = unicode(pilot)
        query = query.filter(Flight.pilot_id == pilot.id)

    elif page == 'club':
        club = get_requested_record(Club, id)
        name = unicode(club)
        query = query.filter(Flight.club_id == club.id)
        pilots_query = pilots_query.filter(Flight.club_id == club.id)

    elif page == 'airport':
        airport = get_requested_record(Airport, id)
        name = unicode(airport)
        query = query.filter(Flight.takeoff_airport_id == airport.id)
        pilots_query = pilots_query.filter(Flight.takeoff_airport_id == airport.id)

    elif page is not None:
        abort(404)

    query = query.filter(Flight.is_rankable())

    query = query.group_by(Flight.year).order_by(Flight.year.desc())

    if page == 'pilot':
        sum_pilots = 0
    else:
        sum_pilots = pilots_query.scalar()

    list = []
    for row in query:
        row.average_distance = row.distance / row.flights
        row.average_duration = row.duration / row.flights

        list.append({
            'year': row.year,
            'flights': row.flights,
            'distance': row.distance,
            'duration': row.duration.total_seconds(),
            'pilots': row.pilots,
            'average_distance': row.distance / row.flights,
            'average_duration': row.duration.total_seconds() / row.flights,
        })

    return jsonify(name=name, years=list, sumPilots=sum_pilots)
示例#15
0
    def generateSummaryJs(self, target_fn):
        n_primers = self.session.query(func.count(PrimerSet.id)).one()[0]
        n_markers = (self.session.query(
            func.count(distinct(Ortholog.id)))
            .join(PrimerSet)
            .one()
        )[0]
        n_species = (self.session.query(
            PrimerSet.num_species,
            func.count(distinct(Ortholog.id)),
            func.count(PrimerSet.id))
            .join(Ortholog)
            .group_by(PrimerSet.num_species)
            .all()
        )
        categories = (self.session.query(
            Category.name,
            func.count(distinct(Ortholog.id)))
            .join(Function)
            .join(Ortholog, Function.orthologs)
            .join(PrimerSet)
            .group_by(Category.name)
            .all()
        )
        functions = (self.session.query(
            Category.name,
            Function.shortcode,
            func.count(distinct(Ortholog.id)))
                     .join(Function)
            .join(Ortholog, Function.orthologs)
            .join(PrimerSet)
            .group_by(Function.shortcode)
            .order_by(Category.name)
            .all()
        )

        out_str = '''var summary = [{
    'n_primers': %i,
    'n_markers': %i
}];

var species = [
    %s
];

var categories = [
    %s
];

var subcats = [
    %s
];''' % (n_primers, n_markers,
             ',\n\t'.join(["[%d, %d, %d]" % x for x in n_species]),
             ',\n\t'.join(["['%s', %i]" % x for x in categories]),
             ',\n\t'.join(["['%s', %i]" % (x[1], x[2]) for x in functions]),)

        with open(target_fn, 'wt') as outfile:
            print(outfile.name)
            outfile.write(out_str)
示例#16
0
 def info(*args, **kwargs):
     DB = db_op.rota
     g.main_infos = None
     date = time.strftime('%Y-%m-%d',time.localtime())
     ym = time.strftime('%Y',time.localtime())
     new_date = datetime.date.today()+datetime.timedelta(1)
     user = request.cookies.get('user')
     db = db_op.idc_users
     val = db.query.with_entities(db.grade).filter(db.name == user).all()
     grade = int(val[0][0]) if val else 10
     if user and  '@' in user:
         user = user.split('@')[0]
     data=[user]
     try:
         # 生成今日和明日的运维排班
         users = []
         duty = u'运维值班'
         pools = [u'李晓辉',u'周福成']
         for t in (date,new_date):
             VAL = DB.query.with_entities(DB.name).filter(and_(DB.date == t,DB.duty == duty)).all()
             if VAL:
                 user = VAL[0][0]
             else:
                 user = random.choice(pools)
                 c = DB(name = user,duty = duty,date = t)
                 db_op.DB.session.add(c)
                 db_op.DB.session.commit()
             pools.remove(user)
             users.append(user)
         data.extend(users)
         ip=request.headers.get('X-Forwarded-For')
         if not ip :
             ip = request.remote_addr
         if ',' in ip:
             ip = ip.split(',')[0]
         #获取页面菜单
         DB = db_op.op_menu
         nav_val = {}
         sub_val = {}
         Menu_val = DB.query.with_entities(DB.Menu_name, DB.id_name, DB.module_name, DB.action_name).filter(DB.grade >= grade).all()
         navMenu = DB.query.with_entities(distinct(DB.Menu_name)).filter(and_(DB.Menu == 'navMenu',DB.grade >= grade)).order_by(DB.Menu_id).all()
         navMenu = [Menu[0] for Menu in navMenu]
         for Menu in navMenu:
             val = [val[1:] for val in Menu_val if Menu in val]
             if val:
                 nav_val[Menu] = val
         submenu = DB.query.with_entities(distinct(DB.Menu_name)).filter(and_(DB.Menu == 'submenu',DB.grade >= grade)).order_by(DB.Menu_id).all()
         submenu = [menu[0] for menu in submenu]
         for menu in submenu:
             val = [val[2:] for val in Menu_val if menu in val]
             if val:
                 sub_val[menu] = val
         g.main_infos = {'datas':data,'navMenu':navMenu,'nav_val':nav_val,'submenu':submenu,'sub_val':sub_val,'ip':ip,'ym':ym}
         return func(*args, **kwargs)
     except Exception as e:
         loging.write(e)
         return func(*args, **kwargs)
     finally:
         db_op.DB.session.remove()
示例#17
0
	def get_sites(self, site_code = ""):
		result = None
		if (site_code):
			result = self._edit_session.query(distinct(Series.site_id), Series.site_code, Series.site_name).filter_by(site_code=site_code).one()
		else:
			result = self._edit_session.query(distinct(Series.site_id), Series.site_code, Series.site_name).order_by(Series.site_code).all()

		return result
示例#18
0
 def delete_orphans(self):
     self.session.query(Genre).filter(~Genre.id.in_(self.session.query(distinct(TrackInfo.genre_id)))).delete(False)
     self.session.commit()
     self.session.query(Album).filter(~Album.id.in_(self.session.query(distinct(TrackInfo.album_id)))).delete(False)
     self.session.commit()
     self.session.query(Cover).filter(~Cover.id.in_(self.session.query(distinct(Album.cover_id)))).delete(False)
     self.session.commit()
     self.session.query(Artist).filter(~Artist.id.in_(self.session.query(distinct(Album.artist_id)))).delete(False)
     self.session.commit()
示例#19
0
    def deferred_customer_widget(node, kw):

        if default_option:
            values = [default_option]
        else:
            values = []
        if is_admin:
            query = Customer.query().join(Customer.company)
            query = query.options(
                contains_eager(Customer.company).load_only('name')
            )
            query = query.options(load_only('id', 'label'))

            if with_invoice:
                query = query.filter(
                    Customer.id.in_(
                        DBSESSION().query(distinct(Task.customer_id)).filter(
                            Task.type_.in_(['invoice', 'cancelinvoice'])
                        )
                    )
                )
            elif with_estimation:
                query = query.filter(
                    Customer.id.in_(
                        DBSESSION().query(distinct(Task.customer_id)).filter(
                            Task.type_ == 'estimation'
                        )
                    )
                )

            datas = OrderedDict()

            for item in query:
                datas.setdefault(item.company.name, []).append(
                    (item.id, item.label)
                )

            # All customers, grouped by Company
            for company_name, customers in datas.items():
                values.append(
                    deform.widget.OptGroup(
                        company_name,
                        *customers
                    )
                )
        else:
            # Company customers only
            company = kw['request'].context
            for cust in company.customers:
                values.append(
                    (cust.id, u"%s (%s)" % (cust.name, cust.code))
                )

        return deform.widget.Select2Widget(
            values=values,
            **(widget_options or {})
        )
示例#20
0
def index(page=None, id=None):
    name = None

    query = db.session.query(
        Flight.year.label("year"),
        func.count("*").label("flights"),
        func.count(distinct(Flight.pilot_id)).label("pilots"),
        func.sum(Flight.olc_classic_distance).label("distance"),
        func.sum(Flight.duration).label("duration"),
    )

    pilots_query = db.session.query(func.count(distinct(Flight.pilot_id)))

    if page == "pilot":
        pilot = get_requested_record(User, id)
        name = pilot.name
        query = query.filter(Flight.pilot_id == pilot.id)

    elif page == "club":
        club = get_requested_record(Club, id)
        name = club.name
        query = query.filter(Flight.club_id == club.id)
        pilots_query = pilots_query.filter(Flight.club_id == club.id)

    elif page == "airport":
        airport = get_requested_record(Airport, id)
        name = airport.name
        query = query.filter(Flight.takeoff_airport_id == airport.id)
        pilots_query = pilots_query.filter(Flight.takeoff_airport_id == airport.id)

    elif page is not None:
        abort(404)

    query = query.filter(Flight.is_rankable())

    query = query.group_by(Flight.year).order_by(Flight.year.desc())

    if page == "pilot":
        sum_pilots = 0
    else:
        sum_pilots = pilots_query.scalar()

    list = []
    for row in query:
        list.append(
            {
                "year": row.year,
                "flights": row.flights,
                "distance": row.distance,
                "duration": row.duration.total_seconds(),
                "pilots": row.pilots,
                "average_distance": row.distance / row.flights,
                "average_duration": row.duration.total_seconds() / row.flights,
            }
        )

    return jsonify(name=name, years=list, sumPilots=sum_pilots)
示例#21
0
    def get_top_policies(self, cutoff=0.05):
        count_map = defaultdict(int)

        question_query = (
            db.session.query(
                PolicyDomain.id,
                func.count(distinct(Question.id)),
            )
            .select_from(Question)
            .join(Question.asked)
            .join(Ask.mandate)
            .join(Mandate.group_memberships)
            .filter(MpGroupMembership.mp_group == self.party)
            .filter(MpGroupMembership.interval.contains(Question.date))
            .join(Question.policy_domain)
            .group_by(PolicyDomain.id)
        )
        for policy_domain_id, count in question_query:
            count_map[policy_domain_id] += count

        proposal_query = (
            db.session.query(
                PolicyDomain.id,
                func.count(distinct(Proposal.id)),
            )
            .select_from(Proposal)
            .filter(Proposal.date >= LEGISLATURE_2012_START)
            .join(Proposal.sponsorships)
            .join(Sponsorship.mandate)
            .join(Mandate.group_memberships)
            .filter(MpGroupMembership.mp_group == self.party)
            .filter(MpGroupMembership.interval.contains(Proposal.date))
            .join(Proposal.policy_domain)
            .group_by(PolicyDomain.id)
        )
        for policy_domain_id, count in proposal_query:
            count_map[policy_domain_id] += count

        total = sum(count_map.values())

        policy_list = []
        if total:
            for policy_domain in PolicyDomain.query:
                interest = count_map.get(policy_domain.id, 0) / total
                if interest > cutoff:
                    policy_list.append({
                        'slug': policy_domain.slug,
                        'name': policy_domain.name,
                        'interest': interest,
                    })

        return sorted(
            policy_list,
            reverse=True,
            key=lambda p: p['interest'],
        )
示例#22
0
	def get_variables(self, site_code = ""):	# covers NoDV, VarUnits, TimeUnits
		result = None
		if (site_code):
			result = self._edit_session.query(
				distinct(Series.variable_id), Series.variable_code, Series.variable_name).filter_by(site_code=site_code).order_by(Series.variable_code
			).all()
		else:
			result = self._edit_session.query(distinct(Series.variable_id), Series.variable_code, Series.variable_name).order_by(Series.variable_code).all()

		return result
示例#23
0
def heroes():
    _heroes_and_replay_counts = db.session.query(db.func.count(distinct(ReplayPlayer.replay_id)), Hero)\
        .join(ReplayPlayer.hero)\
        .group_by(ReplayPlayer.hero_id)\
        .order_by(db.func.count(distinct(ReplayPlayer.replay_id)).desc())\
        .all()

    return render_template("dota/heroes.html",
                           title="Heroes - Dotabank",
                           heroes_and_replay_counts=_heroes_and_replay_counts)
示例#24
0
def test_event_assign_recipients():
    """Test `Event.assign_recipients()`."""
    user1 = factories.User()
    user2 = factories.User()
    user3 = factories.User()
    user4 = factories.User()

    event = factories.Event(number_of_recipients=2)

    event.users.extend((user1, user2, user3, user4))

    event.assign_recipients()

    # After assigning recipients, the event should be locked.
    assert event.locked

    # Check the number of records.
    expected = len(event.users) * event.number_of_recipients
    actual = models.EventRecipient.query.count()
    assert actual == expected

    # Check the number of users.
    expected = len(event.users)
    actual = db.session.query(
        func.count(distinct(models.EventRecipient.user_id))
    ).first()[0]
    assert actual == expected

    # Check the number of recipients.
    expected = len(event.users)
    actual = db.session.query(
        func.count(distinct(models.EventRecipient.recipient_id))
    ).first()[0]
    assert actual == expected

    # Check that each user has the correct number of recipients.
    expected = event.number_of_recipients
    users = db.session.query(
        func.count(models.EventRecipient.recipient_id)
    ).group_by(models.EventRecipient.user_id).all()
    for actual, *_ in users:
        assert actual == expected

    # Check that each recipient has the correct number of users.
    expected = event.number_of_recipients
    recipients = db.session.query(
        func.count(models.EventRecipient.user_id)
    ).group_by(models.EventRecipient.recipient_id).all()
    for actual, *_ in recipients:  # NOQA
        assert actual == expected
示例#25
0
def calc_report(period, period_moments):
    n = 2

    moment_res = db.session.query(
        sqla.distinct(models.TopicMomentModel.moment)).order_by(
            models.TopicMomentModel.moment.desc()).limit(n).all()

    # Skip the current moment because it is presumably incomplete
    moment_res = moment_res[1:]

    for m_row in moment_res:
        moment = m_row[0]

        # Get all of the moments leading up to this moment
        moment_res = db.session.query(
            sqla.distinct(models.TopicMomentModel.moment)).order_by(
                models.TopicMomentModel.moment.desc()).filter(
                    models.TopicMomentModel.moment <= moment).limit(period_moments).all()
        moments = [m[0] for m in moment_res]

        for site_id in sites.site_array:
            print "Calculating", site_id, moment, period
            res = db.session.query(models.AvgMomentumPeriodReportModel).filter_by(
                site_id=site_id,
                period=period,
                moment=moment).first()
            if res:
                print "Skipping", site_id, moment, period

            res = db.session.query(models.TopicMomentDerivModel.topic_id, sql.func.avg(models.TopicMomentDerivModel.value)).filter(
                models.TopicMomentDerivModel.site_id == site_id,
                models.TopicMomentDerivModel.moment_to.in_(moments)).group_by(
                    models.TopicMomentDerivModel.topic_id).all()

            print "Got results", len(res)

            for topic_id, avg_val in res:
                m = models.AvgMomentumPeriodReportModel(
                    site_id=site_id,
                    period=period,
                    moment=moment,
                    topic_id=topic_id,
                    value=avg_val
                )

                db.session.add(m)

            print "Committing"
            db.session.commit()
示例#26
0
def get_results(runtime, package_id, agg_type):
    print "results: lookup by aggtype"
    print agg_type
    print agg_type.test_id

    if agg_type.test_id is not None:
        results = db.session.query(distinct(models.Result.result_identifier)).filter(
            models.Result.test_id == agg_type.test_id
            ).filter(
            models.Result.result_data == '1'
            )
    else:
        results = db.session.query(distinct(models.Result.result_identifier))

    print "results: filter by runtime,packageid"

    results = results.filter(
        models.Result.runtime_id == runtime
        ).filter(
        models.Result.package_id == package_id
        ).all()

    print "doing distinct on result ids"

    result_identifiers = results # set([r.result_identifier for r in results])

    print "finished doing distinct"
    print len(result_identifiers)

    if result_identifiers:
        results = models.Result.query.filter(
            models.Result.runtime_id == runtime,
            models.Result.package_id == package_id,
            models.Result.result_identifier.in_(result_identifiers)
            ).all()
    else:
        print "short cut"
        results = []

    print "doing res2"

    results2 = models.Result.query.filter(
        models.Result.runtime_id == runtime,
        models.Result.package_id == package_id,
        models.Result.result_identifier == None
        ).all()

    results += results2
    return set([r.id for r in results])
示例#27
0
文件: api.py 项目: wonderpl/dolly-web
def get_users_categories(user_ids=None, start=None, stop=None):
    from rockpack.mainsite.services.video import models
    from rockpack.mainsite.services.user.models import User

    query = db.session.query(User, models.Channel, func.count(models.VideoInstance.id)).outerjoin(
        models.Channel,
        (models.Channel.owner == User.id) &
        (models.Channel.deleted == False) &
        (models.Channel.visible == True) &
        (models.Channel.public == True)
    ).outerjoin(
        models.VideoInstance,
        (models.VideoInstance.channel == models.Channel.id) &
        (models.VideoInstance.deleted == False)
    ).options(
        lazyload(models.Channel.category_rel),
        contains_eager(models.Channel.owner_rel)
    )

    if start:
        updated_channels = readonly_session.query(distinct(models.Channel.id))\
            .filter(models.Channel.date_updated.between(start, stop))

        updated_instances = readonly_session.query(distinct(models.VideoInstance.channel))\
            .filter(models.VideoInstance.date_updated.between(start, stop))

        updated_users = readonly_session.query(distinct(models.Channel.id))\
            .join(User, User.id == models.Channel.owner)\
            .filter(User.date_updated.between(start, stop))

        unioned = updated_channels.union_all(updated_instances, updated_users).subquery()
        query = query.filter(models.Channel.id.in_(unioned))

    query = query.group_by(User.id, models.Channel.id).order_by(User.id)

    if user_ids:
        query = query.filter(User.id.in_(user_ids))

    category_map = {}

    for user, channel, video_count in query:
        if channel and channel.category and condition_for_category(user, channel, video_count):
            category_map.setdefault(user, []).append(channel.category)
        else:
            category_map.setdefault(user, [])

    app.logger.info('User category map size: %d', len(category_map))

    return category_map
示例#28
0
    def query(self):
        """
        Return the main query used to find objects

        e.g:

            query = DBSESSION().query(distinct(UserDatas.id), UserDatas)
            query = query.filter(UserDatas.name.startswith('test'))
            query = query.outerjoin(UserDatas.conseiller)
            query = query.filter(User.lastname=='A manager')
            query = query.filter(
                UserDatas.id.in_(
                    [list of ids retrieved from independant queries]
                )
            )

        """
        self.already_joined = []
        if self.root:
            main_query = DBSESSION().query(distinct(self.model.id), self.model)
        else:
            main_query = DBSESSION().query(distinct(self.model.id))

        # Pour chaque critère sur lesquels on va ajouter des filtres, on a
        # besoin d'être sûr que la classe concernée est bien requêtée, il faut
        # donc ajouter des outerjoins pour chaque classe liée.

        # NOTE: on ne gère pas les alias (les joins sur deux tables identiques
        # pour deux relations différentes)
        for criterion in self.query_helpers:
            # On génère le filtre
            filter_ = criterion.gen_filter()
            having = criterion.gen_having_clause()

            # si il y a un filtre ...
            if filter_ is not None:
                main_query = self.join(main_query, criterion)
                main_query = main_query.filter(filter_)

            elif having is not None:
                main_query = self.join(main_query, criterion)
                main_query = main_query.group_by(self.model.id)
                main_query = main_query.having(having)

        if self.query_factories:
            ids = list(self._get_ids_from_factories())
            main_query = main_query.filter(self.model.id.in_(ids))

        return main_query
示例#29
0
def update_channel_view_counts(time_from=None, time_to=None):
    """Update view counts for channel."""
    # For each channel, select the total number of users
    # who've made 1 or more actions on a channel per hour
    session = ChannelLocaleMeta.query.session
    for object_type in ('video', 'channel', ):
        ua = None
        ids = {}

        if object_type == 'video':

            ua = UserActivity.query.session.query(
                UserActivity.locale,
                VideoInstance.channel,
                func.count(distinct(UserActivity.user))
            ).filter(
                VideoInstance.id == UserActivity.object_id
            ).group_by(
                UserActivity.locale,
                VideoInstance.channel
            )

        else:
            ua = UserActivity.query.session.query(
                UserActivity.locale,
                UserActivity.object_id,
                func.count(distinct(UserActivity.user))
            ).group_by(
                UserActivity.locale,
                UserActivity.object_id
            )

        ua = ua.filter(
            UserActivity.date_actioned.between(time_from, time_to)
        )

        if not ua.count():
            continue

        for u in ua:
            loc, channel, val = u
            ids.setdefault(loc, {}).setdefault(channel, 0)
            ids[loc][channel] += val

        for locale in ids.keys():
            channel_metas = ChannelLocaleMeta.query.filter(ChannelLocaleMeta.locale == locale, ChannelLocaleMeta.channel.in_(ids[locale].keys()))
            for meta in channel_metas:
                meta.view_count += ids[locale][meta.channel]
                session.add(meta)
示例#30
0
文件: dal.py 项目: wenbs/mptracker
    def get_policy_top_parties(self, policy_slug, cutoff=0.05):
        count_map = defaultdict(int)

        question_query = (
            db.session.query(MpGroupMembership.mp_group_id, func.count(distinct(Question.id)))
            .select_from(Question)
            .join(Question.asked)
            .join(Ask.mandate)
            .join(Mandate.group_memberships)
            .filter(MpGroupMembership.interval.contains(Question.date))
            .join(Question.policy_domain)
            .filter(PolicyDomain.slug == policy_slug)
            .join(MpGroupMembership.mp_group)
            .filter(MpGroup.year == 2012)
            .group_by(MpGroupMembership.mp_group_id)
        )

        for mp_group_id, count in question_query:
            count_map[mp_group_id] += count

        proposal_query = (
            db.session.query(MpGroupMembership.mp_group_id, func.count(distinct(Proposal.id)))
            .select_from(Proposal)
            .filter(Proposal.date >= LEGISLATURE_2012_START)
            .join(Proposal.sponsorships)
            .join(Sponsorship.mandate)
            .join(Mandate.group_memberships)
            .filter(MpGroupMembership.interval.contains(Proposal.date))
            .join(Proposal.policy_domain)
            .filter(PolicyDomain.slug == policy_slug)
            .join(MpGroupMembership.mp_group)
            .filter(MpGroup.year == 2012)
            .group_by(MpGroupMembership.mp_group_id)
        )

        for mp_group_id, count in proposal_query:
            count_map[mp_group_id] += count

        total = sum(count_map.values())

        group_list = []
        if total:
            for party in MpGroup.query:
                interest = count_map.get(party.id, 0) / total
                if interest > cutoff:
                    group_list.append({"short_name": party.short_name, "name": party.name, "interest": interest})

        return sorted(group_list, reverse=True, key=lambda p: p["interest"])
示例#31
0
def getMonths(db, V):
    res = pd.DataFrame(db.session.query(distinct(V.Month)).all(),
                       columns=['Month'])
    res['Month'] = res['Month'].astype(str)
    return res
示例#32
0
def getVioCat(db, V):
    res = pd.DataFrame(db.session.query(distinct(V.ViolationCategory)).all(),
                       columns=['ViolationCategory'])
    return res
示例#33
0
def getQtrs(db, V):
    res = pd.DataFrame(db.session.query(distinct(V.Qtr)).all(),
                       columns=['Qtr'])
    res['Qtr'] = res['Qtr'].astype(str)
    return res
示例#34
0
def index():
    statement = select([distinct(events.c.category)])
    categories = [
        str(entry[0]) for entry in db.engine.execute(statement).fetchall()
    ]
    return render_template('index.html', categories=categories)
示例#35
0
def query_overrides(request):
    """
    Search for overrides by various criteria.

    The following optional parameters may be used when searching for overrides:
        builds (list): A list of NVRs to search overrides by.
        expired (bool): If True, limit search to expired overrides. If False, limit search to active
            overrides.
        like (basestring): Perform an SQL "like" query against build NVRs with the given string.
        packages (list): A list of package names to search overrides by.
        releases (list): A list of release names to limit the overrides search by.
        search (basestring): Perform an SQL "ilike" query against build NVRs with the given string.
        submitter (basestring): Search for overrides submitted by the given username.

    Returns:
        dict: A dictionary with the following keys:
            overrides: An iterable containing the matched overrides.
            page: The current page number in the results.
            pages: The number of pages of results that match the query.
            rows_per_page: The number of rows on the page.
            total: The total number of overrides that match the criteria.
            chrome: The caller supplied chrome.
            display_user: The current username.
    """
    db = request.db
    data = request.validated
    query = db.query(BuildrootOverride)

    expired = data.get('expired')
    if expired is not None:
        if expired:
            query = query.filter(BuildrootOverride.expired_date.isnot(None))
        else:
            query = query.filter(BuildrootOverride.expired_date.is_(None))

    builds = data.get('builds')
    if builds is not None:
        query = query.join(BuildrootOverride.build)
        query = query.filter(or_(*[Build.nvr == bld for bld in builds]))

    packages = data.get('packages')
    if packages is not None:
        query = query.join(BuildrootOverride.build).join(Build.package)
        query = query.filter(or_(*[Package.name == pkg.name for pkg in packages]))

    releases = data.get('releases')
    if releases is not None:
        query = query.join(BuildrootOverride.build).join(Build.release)
        query = query.filter(or_(*[Release.name == r.name for r in releases]))

    like = data.get('like')
    if like is not None:
        query = query.join(BuildrootOverride.build)
        query = query.filter(or_(*[
            Build.nvr.like('%%%s%%' % like)
        ]))

    search = data.get('search')
    if search is not None:
        query = query.join(BuildrootOverride.build)
        query = query.filter(Build.nvr.ilike('%%%s%%' % search))

    submitter = data.get('user')
    if submitter is not None:
        query = query.filter(BuildrootOverride.submitter == submitter)

    query = query.order_by(BuildrootOverride.submission_date.desc())

    # We can't use ``query.count()`` here because it is naive with respect to
    # all the joins that we're doing above.
    count_query = query.with_labels().statement\
        .with_only_columns([func.count(distinct(BuildrootOverride.id))])\
        .order_by(None)
    total = db.execute(count_query).scalar()

    page = data.get('page')
    rows_per_page = data.get('rows_per_page')
    pages = int(math.ceil(total / float(rows_per_page)))
    query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)

    return dict(
        overrides=query.all(),
        page=page,
        pages=pages,
        rows_per_page=rows_per_page,
        total=total,
        chrome=data.get('chrome'),
        display_user=data.get('display_user'),
    )
示例#36
0
    user_count = session.query(User.id).count()  # 统计全部user的数量
    user_count = session.query(sqlalchemy.func.count(
        User.id)).scalar()  # scalar操作返回第一行数据的第一个字段
    session.query(sqlalchemy.func.count("*")).select_from(
        User).scalar()  # scalar操作返回第一行数据的第一个字段
    session.query(sqlalchemy.func.count(1)).select_from(
        User).scalar()  # scalar操作返回第一行数据的第一个字段
    session.query(sqlalchemy.func.count(
        User.id)).filter(User.id > 0).scalar()  # filter() 中包含 User,因此不需要指定表

    session.query(sqlalchemy.func.sum(User.age)).scalar()  # 求和运算,运用scalar函数
    session.query(sqlalchemy.func.avg(User.age)).scalar()  # 求均值运算,运用scalar函数
    session.query(sqlalchemy.func.md5(
        User.name)).filter(User.id == 1).scalar()  # 运用md5函数

    users = session.query(sqlalchemy.distinct(User.name))  # 去重查询,根据name进行去重
    users = session.query(User).order_by(User.name)  # 排序查询,正序查询
    users = session.query(User).order_by(User.name.desc())  # 排序查询,倒序查询
    users = session.query(User).order_by(sqlalchemy.desc(
        User.name))  # 排序查询,倒序查询的另外一种形式

    users = session.query(User.id, User.name)  # 只查询部分属性
    users = session.query(User.name.label("user_name"))  # 结果集的列取别名
    for user in users:
        print("label test:", user.user_name)  # 这里使用别名

    users = session.query(
        sqlalchemy.func.count(User.name).label("count"),
        User.age).group_by(User.age)  # 分组查询
    for user in users:
        print("age:{0}, count:{1}".format(user.age, user.count))
示例#37
0
def subscription_get_all_subscriber_ids(resource, resource_id, session=None):
    '''Test subscription discovery. The tested algorithm is as follows:

    If you're subscribed to a project_group, you will be notified about
    project_group, project, story, and task changes.

    If you are subscribed to a project, you will be notified about project,
    story, and task changes.

    If you are subscribed to a task, you will be notified about changes to
    that task.

    If you are subscribed to a story, you will be notified about changes to
    that story and its tasks.

    :param resource: The name of the resource.
    :param resource_id: The ID of the resource.
    :return: A list of user id's.
    '''
    affected = {
        'project_group': set(),
        'project': set(),
        'story': set(),
        'task': set(),
        'worklist': set()
    }

    # If we accidentally pass a timeline_event, we're actually going to treat
    # it as a story.
    if resource == 'timeline_event':
        event = api_base.entity_get(TimeLineEvent,
                                    resource_id,
                                    session=session)
        if event:
            if event.story_id is not None:
                resource = 'story'
                resource_id = event.story_id
            elif event.worklist_id is not None:
                resource = 'worklist'
                resource_id = event.worklist_id
            else:
                return set()
        else:
            return set()

    # Sanity check exit.
    if resource not in affected.keys():
        return set()

    # Make sure the requested resource is going to be handled.
    affected[resource].add(resource_id)

    users = None

    # Resolve either from story->task or from task->story, so the root
    # resource id remains pristine.
    if resource == 'story':
        # If the story is private, make a whitelist of users to notify.
        story = api_base.model_query(models.Story, session) \
            .options(subqueryload(models.Story.permissions)) \
            .filter_by(id=resource_id).first()

        if story.private:
            users = [user.id for user in story.permissions[0].users]

        # Get this story's tasks
        query = api_base.model_query(models.Task.id, session=session) \
            .filter(models.Task.story_id.in_(affected['story']))

        affected['task'] = affected['task'] \
            .union(r for (r,) in query.all())
    elif resource == 'task':
        # Get this tasks's story
        query = api_base.model_query(models.Task.story_id, session=session) \
            .filter(models.Task.id == resource_id)

        affected['story'].add(query.first().story_id)

        story = api_base.model_query(models.Story, session) \
            .options(subqueryload(models.Story.permissions)) \
            .filter_by(id=query.first().story_id).first()

        if story.private:
            users = [user.id for user in story.permissions[0].users]

    # If there are tasks, there will also be projects.
    if affected['task']:
        # Get all the tasks's projects
        query = api_base.model_query(distinct(models.Task.project_id),
                                     session=session) \
            .filter(models.Task.id.in_(affected['task']))

        affected['project'] = affected['project'] \
            .union(r for (r,) in query.all())

    # If there are projects, there will also be project groups.
    if affected['project']:
        # Get all the projects' groups.
        query = api_base.model_query(
            distinct(models.project_group_mapping.c.project_group_id),
            session=session) \
            .filter(models.project_group_mapping.c.project_id
                    .in_(affected['project']))

        affected['project_group'] = affected['project_group'] \
            .union(r for (r,) in query.all())

    # Load all subscribers.
    subscribers = set()
    for affected_type in affected:
        query = api_base.model_query(distinct(
            models.Subscription.user_id), session=session) \
            .filter(models.Subscription.target_type == affected_type) \
            .filter(models.Subscription.target_id.in_(affected[affected_type]))

        if users is not None:
            query = query.filter(models.Subscription.user_id.in_(users))

        results = query.all()
        subscribers = subscribers.union(r for (r, ) in results)

    return subscribers
示例#38
0
def email_send(
    stone,
    table,
):
    """
    邮件发送:调用 email_draw 来实现单个发送
    :param stone: 数据库连接
    :param table: 指定数据库
    :return: 无返回值
    """
    cols = [
        'director', 'director1', 'manager', 'manager1', 'majordomo',
        'principal'
    ]
    # 删除上一次生成的所有文件
    dirlist = sys.path[0] + os.sep + 'temp' + os.sep
    for dirpath, dirnames, filenames in os.walk(dirlist):
        for file in filenames:
            os.remove(os.path.join(dirpath, file))
    # 总监级以下的人员内容生成(员工开始),部门第一负责人内容生成(主管开始)
    for col in cols:
        result = stone.query(distinct(getattr(
            table, col))).filter(getattr(table, col) != None).all()
        # print(result)
        for one in result:
            # print(one)
            # print(stone.query(table).filter(getattr(table,col)==one[0]).all())
            if col == 'principal':
                gather = stone.query(table).filter(
                    and_(
                        getattr(table, col) == one[0],
                        getattr(table, 'job') != '员工')).all()
            else:
                gather = stone.query(table).filter(
                    getattr(table, col) == one[0]).all()
            email_draw(stone, gather, one[0])
    #   相关人员指定发送
    for one in conf.keys():
        if '对接人' in one:
            # print(conf.items(section=one))
            assert len(conf.items(section=one)) == 2, '配置文件中人员指定格式错误'
            result = stone.query(WeekMapping).filter(
                text("name in ('{name}')".format(
                    name="','".join(conf.items(
                        section=one)[1][1].split(',')))))
            for emp in result:
                print(emp)
            email_draw(stone, result, conf.items(section=one)[0][1])
    # 发件内容处理(无人员处理),生成正文内容body
    # 一次没有发送消息
    result = stone.query(table).filter(table.count == 0)
    email_draw(stone, result,
               '{today}未发送人员'.format(today=datetime.date.today()))
    # 生成zip文件
    if os.path.exists(sys.path[0] + os.sep + r'发送情况.zip'):
        os.remove(sys.path[0] + os.sep + r'发送情况.zip')
    f = zipfile.ZipFile(sys.path[0] + os.sep + r'发送情况.zip', 'w',
                        zipfile.ZIP_DEFLATED)
    files = glob.glob(sys.path[0] + os.sep + 'temp' + os.sep + '*')
    for file in files:
        f.write(file, os.path.basename(file))
    f.close()
    # 多媒体文件发送
    sendMultimedia(smtp_server=conf.get(section='email', option='smtp_server'),
                   smtp_port=conf.get(section='email', option='smtp_port'),
                   from_addr=conf.get(section='email', option='from_addr'),
                   from_addr_str=conf.get(section='email',
                                          option='from_addr_str'),
                   password=conf.get(section='email', option='password'),
                   to_address=conf.get(section='email', option='error_email'),
                   header='{today} 生日预警情况'.format(today=datetime.date.today()),
                   body='邮件发送详情见附件',
                   file=sys.path[0] + os.sep + u'发送情况.zip')
示例#39
0
    def get_resources(self, user=None, project=None, source=None,
                      start_timestamp=None, start_timestamp_op=None,
                      end_timestamp=None, end_timestamp_op=None,
                      metaquery=None, resource=None, pagination=None):
        """Return an iterable of api_models.Resource instances

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError('Pagination not implemented')

        s_filter = storage.SampleFilter(user=user,
                                        project=project,
                                        source=source,
                                        start_timestamp=start_timestamp,
                                        start_timestamp_op=start_timestamp_op,
                                        end_timestamp=end_timestamp,
                                        end_timestamp_op=end_timestamp_op,
                                        metaquery=metaquery,
                                        resource=resource)

        session = self._engine_facade.get_session()
        # get list of resource_ids
        res_q = session.query(distinct(models.Resource.resource_id)).join(
            models.Sample,
            models.Sample.resource_id == models.Resource.internal_id)
        res_q = make_query_from_filter(session, res_q, s_filter,
                                       require_meter=False)

        for res_id in res_q.all():
            # get latest Sample
            max_q = (session.query(models.Sample)
                     .join(models.Resource,
                           models.Resource.internal_id ==
                           models.Sample.resource_id)
                     .filter(models.Resource.resource_id == res_id[0]))
            max_q = make_query_from_filter(session, max_q, s_filter,
                                           require_meter=False)
            max_q = max_q.order_by(models.Sample.timestamp.desc(),
                                   models.Sample.id.desc()).limit(1)

            # get the min timestamp value.
            min_q = (session.query(models.Sample.timestamp)
                     .join(models.Resource,
                           models.Resource.internal_id ==
                           models.Sample.resource_id)
                     .filter(models.Resource.resource_id == res_id[0]))
            min_q = make_query_from_filter(session, min_q, s_filter,
                                           require_meter=False)
            min_q = min_q.order_by(models.Sample.timestamp.asc()).limit(1)

            sample = max_q.first()
            if sample:
                yield api_models.Resource(
                    resource_id=sample.resource.resource_id,
                    project_id=sample.resource.project_id,
                    first_sample_timestamp=min_q.first().timestamp,
                    last_sample_timestamp=sample.timestamp,
                    source=sample.resource.source_id,
                    user_id=sample.resource.user_id,
                    metadata=sample.resource.resource_metadata
                )
示例#40
0
def num_blacklist_filter_chats():
    try:
        return SESSION.query(func.count(distinct(
            BlackListFilters.chat_id))).scalar()
    finally:
        SESSION.close()
示例#41
0
    def copy_versions(self):
        """copies versions from one task to another
        """
        # get from task
        from_task = self.get_task_from_tree_view(self.from_task_tree_view)

        # get logged in user
        logged_in_user = self.get_logged_in_user()

        if not from_task:
            QtWidgets.QMessageBox.critical(
                self, 'Error',
                'Please select a task from <b>From Task</b> list')
            return

        # get to task
        to_task = self.get_task_from_tree_view(self.to_task_tree_view)

        if not to_task:
            QtWidgets.QMessageBox.critical(
                self, 'Error', 'Please select a task from <b>To Task</b> list')
            return

        # check if tasks are the same
        if from_task == to_task:
            QtWidgets.QMessageBox.critical(
                self, 'Error', 'Please select two different tasks')
            return

        # get take names and related versions
        # get distinct take names
        from_take_names = map(
            lambda x: x[0],
            db.DBSession.query(distinct(Version.take_name)).filter(
                Version.task == from_task).order_by(Version.take_name).all())

        # create versions for each take
        answer = QtWidgets.QMessageBox.question(
            self, 'Info', "Will copy %s versions from take names:<br><br>"
            "%s"
            "<br><br>"
            "Is that Ok?" %
            (len(from_take_names), '<br>'.join(from_take_names)),
            QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)

        if answer == QtWidgets.QMessageBox.Yes:
            for take_name in from_take_names:
                latest_version = Version.query\
                    .filter_by(task=from_task)\
                    .filter_by(take_name=take_name)\
                    .order_by(Version.version_number.desc())\
                    .first()

                # create a new version
                new_version = Version(task=to_task, take_name=take_name)
                new_version.created_by = logged_in_user
                new_version.extension = latest_version.extension
                new_version.description = \
                    'Moved from another task (id=%s) with Version Mover' % \
                    latest_version.task.id
                new_version.created_with = latest_version.created_with
                db.DBSession.add(new_version)
                db.DBSession.commit()

                # update path
                new_version.update_paths()
                db.DBSession.add(new_version)
                db.DBSession.commit()

                # now copy the last_version file to the new_version path
                try:
                    os.makedirs(new_version.absolute_path)
                except OSError:  # path exists
                    pass

                # move the file there
                shutil.copyfile(latest_version.absolute_full_path,
                                new_version.absolute_full_path)

            # inform the user
            QtWidgets.QMessageBox.information(
                self, 'Success',
                'Successfully copied %s versions' % len(from_take_names))
示例#42
0
def num_stickers_filter_chats():
    try:
        return SESSION.query(func.count(distinct(
            StickersFilters.chat_id))).scalar()
    finally:
        SESSION.close()
def num_chats():
    try:
        return SESSION.query(func.count(distinct(
            CustomFilters.chat_id))).scalar()
    finally:
        SESSION.close()
示例#44
0
class SqlaTable(Model, BaseDatasource):
    """An ORM object for SqlAlchemy table references"""

    type = "table"
    query_language = "sql"
    metric_class = SqlMetric
    column_class = TableColumn
    owner_class = security_manager.user_model

    __tablename__ = "tables"
    __table_args__ = (UniqueConstraint("database_id", "table_name"), )

    table_name = Column(String(250))
    main_dttm_col = Column(String(250))
    database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
    fetch_values_predicate = Column(String(1000))
    owners = relationship(owner_class,
                          secondary=sqlatable_user,
                          backref="tables")
    database = relationship(
        "Database",
        backref=backref("tables", cascade="all, delete-orphan"),
        foreign_keys=[database_id],
    )
    schema = Column(String(255))
    sql = Column(Text)
    is_sqllab_view = Column(Boolean, default=False)
    template_params = Column(Text)

    baselink = "tablemodelview"

    export_fields = (
        "table_name",
        "main_dttm_col",
        "description",
        "default_endpoint",
        "database_id",
        "offset",
        "cache_timeout",
        "schema",
        "sql",
        "params",
        "template_params",
        "filter_select_enabled",
        "fetch_values_predicate",
    )
    update_from_object_fields = [
        f for f in export_fields if f not in ("table_name", "database_id")
    ]
    export_parent = "database"
    export_children = ["metrics", "columns"]

    sqla_aggregations = {
        "COUNT_DISTINCT":
        lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
        "COUNT":
        sa.func.COUNT,
        "SUM":
        sa.func.SUM,
        "AVG":
        sa.func.AVG,
        "MIN":
        sa.func.MIN,
        "MAX":
        sa.func.MAX,
    }

    def make_sqla_column_compatible(self, sqla_col, label=None):
        """Takes a sql alchemy column object and adds label info if supported by engine.
        :param sqla_col: sql alchemy column instance
        :param label: alias/label that column is expected to have
        :return: either a sql alchemy column or label instance if supported by engine
        """
        label_expected = label or sqla_col.name
        db_engine_spec = self.database.db_engine_spec
        if db_engine_spec.supports_column_aliases:
            label = db_engine_spec.make_label_compatible(label_expected)
            sqla_col = sqla_col.label(label)
        sqla_col._df_label_expected = label_expected
        return sqla_col

    def __repr__(self):
        return self.name

    @property
    def connection(self):
        return str(self.database)

    @property
    def description_markeddown(self):
        return utils.markdown(self.description)

    @property
    def datasource_name(self):
        return self.table_name

    @property
    def database_name(self):
        return self.database.name

    @property
    def link(self):
        name = escape(self.name)
        anchor = f'<a target="_blank" href="{self.explore_url}">{name}</a>'
        return Markup(anchor)

    @property
    def schema_perm(self):
        """Returns schema permission if present, database one otherwise."""
        return security_manager.get_schema_perm(self.database, self.schema)

    def get_perm(self):
        return ("[{obj.database}].[{obj.table_name}]"
                "(id:{obj.id})").format(obj=self)

    @property
    def name(self):
        if not self.schema:
            return self.table_name
        return "{}.{}".format(self.schema, self.table_name)

    @property
    def full_name(self):
        return utils.get_datasource_full_name(self.database,
                                              self.table_name,
                                              schema=self.schema)

    @property
    def dttm_cols(self):
        l = [c.column_name for c in self.columns if c.is_dttm]  # noqa: E741
        if self.main_dttm_col and self.main_dttm_col not in l:
            l.append(self.main_dttm_col)
        return l

    @property
    def num_cols(self):
        return [c.column_name for c in self.columns if c.is_num]

    @property
    def any_dttm_col(self):
        cols = self.dttm_cols
        if cols:
            return cols[0]

    @property
    def html(self):
        t = ((c.column_name, c.type) for c in self.columns)
        df = pd.DataFrame(t)
        df.columns = ["field", "type"]
        return df.to_html(
            index=False,
            classes=("dataframe table table-striped table-bordered "
                     "table-condensed"),
        )

    @property
    def sql_url(self):
        return self.database.sql_url + "?table_name=" + str(self.table_name)

    def external_metadata(self):
        cols = self.database.get_columns(self.table_name, schema=self.schema)
        for col in cols:
            try:
                col["type"] = str(col["type"])
            except CompileError:
                col["type"] = "UNKNOWN"
        return cols

    @property
    def time_column_grains(self):
        return {
            "time_columns": self.dttm_cols,
            "time_grains": [grain.name for grain in self.database.grains()],
        }

    @property
    def select_star(self):
        # show_cols and latest_partition set to false to avoid
        # the expensive cost of inspecting the DB
        return self.database.select_star(self.name,
                                         show_cols=False,
                                         latest_partition=False)

    def get_col(self, col_name):
        columns = self.columns
        for col in columns:
            if col_name == col.column_name:
                return col

    @property
    def data(self):
        d = super(SqlaTable, self).data
        if self.type == "table":
            grains = self.database.grains() or []
            if grains:
                grains = [(g.duration, g.name) for g in grains]
            d["granularity_sqla"] = utils.choicify(self.dttm_cols)
            d["time_grain_sqla"] = grains
            d["main_dttm_col"] = self.main_dttm_col
            d["fetch_values_predicate"] = self.fetch_values_predicate
            d["template_params"] = self.template_params
        return d

    def values_for_column(self, column_name, limit=10000):
        """Runs query against sqla to retrieve some
        sample values for the given column.
        """
        cols = {col.column_name: col for col in self.columns}
        target_col = cols[column_name]
        tp = self.get_template_processor()

        qry = (select([target_col.get_sqla_col()
                       ]).select_from(self.get_from_clause(tp)).distinct())
        if limit:
            qry = qry.limit(limit)

        if self.fetch_values_predicate:
            tp = self.get_template_processor()
            qry = qry.where(tp.process_template(self.fetch_values_predicate))

        engine = self.database.get_sqla_engine()
        sql = "{}".format(
            qry.compile(engine, compile_kwargs={"literal_binds": True}))
        sql = self.mutate_query_from_config(sql)

        df = pd.read_sql_query(sql=sql, con=engine)
        return [row[0] for row in df.to_records(index=False)]

    def mutate_query_from_config(self, sql):
        """Apply config's SQL_QUERY_MUTATOR

        Typically adds comments to the query with context"""
        SQL_QUERY_MUTATOR = config.get("SQL_QUERY_MUTATOR")
        if SQL_QUERY_MUTATOR:
            username = utils.get_username()
            sql = SQL_QUERY_MUTATOR(sql, username, security_manager,
                                    self.database)
        return sql

    def get_template_processor(self, **kwargs):
        return get_template_processor(table=self,
                                      database=self.database,
                                      **kwargs)

    def get_query_str_extended(self, query_obj):
        sqlaq = self.get_sqla_query(**query_obj)
        sql = self.database.compile_sqla_query(sqlaq.sqla_query)
        logging.info(sql)
        sql = sqlparse.format(sql, reindent=True)
        if query_obj["is_prequery"]:
            query_obj["prequeries"].append(sql)
        sql = self.mutate_query_from_config(sql)
        return QueryStringExtended(labels_expected=sqlaq.labels_expected,
                                   sql=sql)

    def get_query_str(self, query_obj):
        return self.get_query_str_extended(query_obj).sql

    def get_sqla_table(self):
        tbl = table(self.table_name)
        if self.schema:
            tbl.schema = self.schema
        return tbl

    def get_from_clause(self, template_processor=None):
        # Supporting arbitrary SQL statements in place of tables
        if self.sql:
            from_sql = self.sql
            if template_processor:
                from_sql = template_processor.process_template(from_sql)
            from_sql = sqlparse.format(from_sql, strip_comments=True)
            return TextAsFrom(sa.text(from_sql), []).alias("expr_qry")
        return self.get_sqla_table()

    def adhoc_metric_to_sqla(self, metric, cols):
        """
        Turn an adhoc metric into a sqlalchemy column.

        :param dict metric: Adhoc metric definition
        :param dict cols: Columns for the current table
        :returns: The metric defined as a sqlalchemy column
        :rtype: sqlalchemy.sql.column
        """
        expression_type = metric.get("expressionType")
        label = utils.get_metric_name(metric)

        if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES["SIMPLE"]:
            column_name = metric.get("column").get("column_name")
            table_column = cols.get(column_name)
            if table_column:
                sqla_column = table_column.get_sqla_col()
            else:
                sqla_column = column(column_name)
            sqla_metric = self.sqla_aggregations[metric.get("aggregate")](
                sqla_column)
        elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES["SQL"]:
            sqla_metric = literal_column(metric.get("sqlExpression"))
        else:
            return None

        return self.make_sqla_column_compatible(sqla_metric, label)

    def get_sqla_query(  # sqla
        self,
        groupby,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        filter=None,  # noqa
        is_timeseries=True,
        timeseries_limit=15,
        timeseries_limit_metric=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        orderby=None,
        extras=None,
        columns=None,
        order_desc=True,
        prequeries=None,
        is_prequery=False,
    ):
        """Querying any sqla table from this common interface"""
        template_kwargs = {
            "from_dttm": from_dttm,
            "groupby": groupby,
            "metrics": metrics,
            "row_limit": row_limit,
            "to_dttm": to_dttm,
            "filter": filter,
            "columns": {col.column_name: col
                        for col in self.columns},
        }
        template_kwargs.update(self.template_params_dict)
        template_processor = self.get_template_processor(**template_kwargs)
        db_engine_spec = self.database.db_engine_spec

        orderby = orderby or []

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        # Database spec supports join-free timeslot grouping
        time_groupby_inline = db_engine_spec.time_groupby_inline

        cols = {col.column_name: col for col in self.columns}
        metrics_dict = {m.metric_name: m for m in self.metrics}

        if not granularity and is_timeseries:
            raise Exception(
                _("Datetime column not provided as part table configuration "
                  "and is required by this type of chart"))
        if not groupby and not metrics and not columns:
            raise Exception(_("Empty query?"))
        metrics_exprs = []
        for m in metrics:
            if utils.is_adhoc_metric(m):
                metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
            elif m in metrics_dict:
                metrics_exprs.append(metrics_dict.get(m).get_sqla_col())
            else:
                raise Exception(_("Metric '{}' is not valid".format(m)))
        if metrics_exprs:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
            main_metric_expr = self.make_sqla_column_compatible(
                main_metric_expr, label)

        select_exprs = []
        groupby_exprs_sans_timestamp = OrderedDict()

        if groupby:
            select_exprs = []
            for s in groupby:
                if s in cols:
                    outer = cols[s].get_sqla_col()
                else:
                    outer = literal_column(f"({s})")
                    outer = self.make_sqla_column_compatible(outer, s)

                groupby_exprs_sans_timestamp[outer.name] = outer
                select_exprs.append(outer)
        elif columns:
            for s in columns:
                select_exprs.append(
                    cols[s].get_sqla_col() if s in cols else self.
                    make_sqla_column_compatible(literal_column(s)))
            metrics_exprs = []

        groupby_exprs_with_timestamp = OrderedDict(
            groupby_exprs_sans_timestamp.items())
        if granularity:
            dttm_col = cols[granularity]
            time_grain = extras.get("time_grain_sqla")
            time_filters = []

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs_with_timestamp[timestamp.name] = timestamp

            # Use main dttm column to support index with secondary dttm columns
            if (db_engine_spec.time_secondary_columns
                    and self.main_dttm_col in self.dttm_cols
                    and self.main_dttm_col != dttm_col.column_name):
                time_filters.append(cols[self.main_dttm_col].get_time_filter(
                    from_dttm, to_dttm))
            time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))

        select_exprs += metrics_exprs

        labels_expected = [c._df_label_expected for c in select_exprs]

        select_exprs = db_engine_spec.make_select_compatible(
            groupby_exprs_with_timestamp.values(), select_exprs)
        qry = sa.select(select_exprs)

        tbl = self.get_from_clause(template_processor)

        if not columns:
            qry = qry.group_by(*groupby_exprs_with_timestamp.values())

        where_clause_and = []
        having_clause_and = []
        for flt in filter:
            if not all([flt.get(s) for s in ["col", "op"]]):
                continue
            col = flt["col"]
            op = flt["op"]
            col_obj = cols.get(col)
            if col_obj:
                is_list_target = op in ("in", "not in")
                eq = self.filter_values_handler(
                    flt.get("val"),
                    target_column_is_numeric=col_obj.is_num,
                    is_list_target=is_list_target,
                )
                if op in ("in", "not in"):
                    cond = col_obj.get_sqla_col().in_(eq)
                    if "<NULL>" in eq:
                        cond = or_(cond,
                                   col_obj.get_sqla_col() == None)  # noqa
                    if op == "not in":
                        cond = ~cond
                    where_clause_and.append(cond)
                else:
                    if col_obj.is_num:
                        eq = utils.string_to_num(flt["val"])
                    if op == "==":
                        where_clause_and.append(col_obj.get_sqla_col() == eq)
                    elif op == "!=":
                        where_clause_and.append(col_obj.get_sqla_col() != eq)
                    elif op == ">":
                        where_clause_and.append(col_obj.get_sqla_col() > eq)
                    elif op == "<":
                        where_clause_and.append(col_obj.get_sqla_col() < eq)
                    elif op == ">=":
                        where_clause_and.append(col_obj.get_sqla_col() >= eq)
                    elif op == "<=":
                        where_clause_and.append(col_obj.get_sqla_col() <= eq)
                    elif op == "LIKE":
                        where_clause_and.append(
                            col_obj.get_sqla_col().like(eq))
                    elif op == "IS NULL":
                        where_clause_and.append(
                            col_obj.get_sqla_col() == None)  # noqa
                    elif op == "IS NOT NULL":
                        where_clause_and.append(
                            col_obj.get_sqla_col() != None)  # noqa
        if extras:
            where = extras.get("where")
            if where:
                where = template_processor.process_template(where)
                where_clause_and += [sa.text("({})".format(where))]
            having = extras.get("having")
            if having:
                having = template_processor.process_template(having)
                having_clause_and += [sa.text("({})".format(having))]
        if granularity:
            qry = qry.where(and_(*(time_filters + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))

        if not orderby and not columns:
            orderby = [(main_metric_expr, not order_desc)]

        for col, ascending in orderby:
            direction = asc if ascending else desc
            if utils.is_adhoc_metric(col):
                col = self.adhoc_metric_to_sqla(col, cols)
            qry = qry.order_by(direction(col))

        if row_limit:
            qry = qry.limit(row_limit)

        if is_timeseries and timeseries_limit and groupby and not time_groupby_inline:
            if self.database.db_engine_spec.inner_joins:
                # some sql dialects require for order by expressions
                # to also be in the select clause -- others, e.g. vertica,
                # require a unique inner alias
                inner_main_metric_expr = self.make_sqla_column_compatible(
                    main_metric_expr, "mme_inner__")
                inner_groupby_exprs = []
                inner_select_exprs = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    inner = self.make_sqla_column_compatible(
                        gby_obj, gby_name + "__")
                    inner_groupby_exprs.append(inner)
                    inner_select_exprs.append(inner)

                inner_select_exprs += [inner_main_metric_expr]
                subq = select(inner_select_exprs).select_from(tbl)
                inner_time_filter = dttm_col.get_time_filter(
                    inner_from_dttm or from_dttm, inner_to_dttm or to_dttm)
                subq = subq.where(
                    and_(*(where_clause_and + [inner_time_filter])))
                subq = subq.group_by(*inner_groupby_exprs)

                ob = inner_main_metric_expr
                if timeseries_limit_metric:
                    ob = self._get_timeseries_orderby(timeseries_limit_metric,
                                                      metrics_dict, cols)
                direction = desc if order_desc else asc
                subq = subq.order_by(direction(ob))
                subq = subq.limit(timeseries_limit)

                on_clause = []
                for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
                    # in this case the column name, not the alias, needs to be
                    # conditionally mutated, as it refers to the column alias in
                    # the inner query
                    col_name = db_engine_spec.make_label_compatible(gby_name +
                                                                    "__")
                    on_clause.append(gby_obj == column(col_name))

                tbl = tbl.join(subq.alias(), and_(*on_clause))
            else:
                if timeseries_limit_metric:
                    orderby = [(
                        self._get_timeseries_orderby(timeseries_limit_metric,
                                                     metrics_dict, cols),
                        False,
                    )]

                # run subquery to get top groups
                subquery_obj = {
                    "prequeries": prequeries,
                    "is_prequery": True,
                    "is_timeseries": False,
                    "row_limit": timeseries_limit,
                    "groupby": groupby,
                    "metrics": metrics,
                    "granularity": granularity,
                    "from_dttm": inner_from_dttm or from_dttm,
                    "to_dttm": inner_to_dttm or to_dttm,
                    "filter": filter,
                    "orderby": orderby,
                    "extras": extras,
                    "columns": columns,
                    "order_desc": True,
                }
                result = self.query(subquery_obj)
                dimensions = [
                    c for c in result.df.columns
                    if c not in metrics and c in groupby_exprs_sans_timestamp
                ]
                top_groups = self._get_top_groups(
                    result.df, dimensions, groupby_exprs_sans_timestamp)
                qry = qry.where(top_groups)

        return SqlaQuery(sqla_query=qry.select_from(tbl),
                         labels_expected=labels_expected)

    def _get_timeseries_orderby(self, timeseries_limit_metric, metrics_dict,
                                cols):
        if utils.is_adhoc_metric(timeseries_limit_metric):
            ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, cols)
        elif timeseries_limit_metric in metrics_dict:
            timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
            ob = timeseries_limit_metric.get_sqla_col()
        else:
            raise Exception(
                _("Metric '{}' is not valid".format(timeseries_limit_metric)))

        return ob

    def _get_top_groups(self, df, dimensions, groupby_exprs):
        groups = []
        for unused, row in df.iterrows():
            group = []
            for dimension in dimensions:
                group.append(groupby_exprs[dimension] == row[dimension])
            groups.append(and_(*group))

        return or_(*groups)

    def query(self, query_obj):
        qry_start_dttm = datetime.now()
        query_str_ext = self.get_query_str_extended(query_obj)
        sql = query_str_ext.sql
        status = utils.QueryStatus.SUCCESS
        error_message = None

        def mutator(df):
            labels_expected = query_str_ext.labels_expected
            if df is not None and not df.empty:
                if len(df.columns) != len(labels_expected):
                    raise Exception(f"For {sql}, df.columns: {df.columns}"
                                    f" differs from {labels_expected}")
                else:
                    df.columns = labels_expected
            return df

        try:
            df = self.database.get_df(sql, self.schema, mutator)
        except Exception as e:
            df = None
            status = utils.QueryStatus.FAILED
            logging.exception(f"Query {sql} on schema {self.schema} failed")
            db_engine_spec = self.database.db_engine_spec
            error_message = db_engine_spec.extract_error_message(e)

        # if this is a main query with prequeries, combine them together
        if not query_obj["is_prequery"]:
            query_obj["prequeries"].append(sql)
            sql = ";\n\n".join(query_obj["prequeries"])
        sql += ";"

        return QueryResult(
            status=status,
            df=df,
            duration=datetime.now() - qry_start_dttm,
            query=sql,
            error_message=error_message,
        )

    def get_sqla_table_object(self):
        return self.database.get_table(self.table_name, schema=self.schema)

    def fetch_metadata(self):
        """Fetches the metadata for the table and merges it in"""
        try:
            table = self.get_sqla_table_object()
        except Exception as e:
            logging.exception(e)
            raise Exception(
                _("Table [{}] doesn't seem to exist in the specified database, "
                  "couldn't fetch column information").format(self.table_name))

        M = SqlMetric  # noqa
        metrics = []
        any_date_col = None
        db_engine_spec = self.database.db_engine_spec
        db_dialect = self.database.get_dialect()
        dbcols = (db.session.query(TableColumn).filter(
            TableColumn.table == self).filter(
                or_(TableColumn.column_name == col.name
                    for col in table.columns)))
        dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}

        for col in table.columns:
            try:
                datatype = db_engine_spec.column_datatype_to_string(
                    col.type, db_dialect)
            except Exception as e:
                datatype = "UNKNOWN"
                logging.error("Unrecognized data type in {}.{}".format(
                    table, col.name))
                logging.exception(e)
            dbcol = dbcols.get(col.name, None)
            if not dbcol:
                dbcol = TableColumn(column_name=col.name, type=datatype)
                dbcol.sum = dbcol.is_num
                dbcol.avg = dbcol.is_num
                dbcol.is_dttm = dbcol.is_time
                db_engine_spec.alter_new_orm_column(dbcol)
            else:
                dbcol.type = datatype
            dbcol.groupby = True
            dbcol.filterable = True
            self.columns.append(dbcol)
            if not any_date_col and dbcol.is_time:
                any_date_col = col.name

        metrics.append(
            M(
                metric_name="count",
                verbose_name="COUNT(*)",
                metric_type="count",
                expression="COUNT(*)",
            ))
        if not self.main_dttm_col:
            self.main_dttm_col = any_date_col
        self.add_missing_metrics(metrics)
        db.session.merge(self)
        db.session.commit()

    @classmethod
    def import_obj(cls, i_datasource, import_time=None):
        """Imports the datasource from the object to the database.

         Metrics and columns and datasource will be overrided if exists.
         This function can be used to import/export dashboards between multiple
         superset instances. Audit metadata isn't copies over.
        """
        def lookup_sqlatable(table):
            return (db.session.query(SqlaTable).join(Database).filter(
                SqlaTable.table_name == table.table_name,
                SqlaTable.schema == table.schema,
                Database.id == table.database_id,
            ).first())

        def lookup_database(table):
            try:
                return (db.session.query(Database).filter_by(
                    database_name=table.params_dict["database_name"]).one())
            except NoResultFound:
                raise DatabaseNotFound(
                    _(
                        "Database '%(name)s' is not found",
                        name=table.params_dict["database_name"],
                    ))

        return import_datasource.import_datasource(db.session, i_datasource,
                                                   lookup_database,
                                                   lookup_sqlatable,
                                                   import_time)

    @classmethod
    def query_datasources_by_name(cls,
                                  session,
                                  database,
                                  datasource_name,
                                  schema=None):
        query = (session.query(cls).filter_by(
            database_id=database.id).filter_by(table_name=datasource_name))
        if schema:
            query = query.filter_by(schema=schema)
        return query.all()

    @staticmethod
    def default_query(qry):
        return qry.filter_by(is_sqllab_view=False)
示例#45
0
def getVehGrp(db, V):
    res = pd.DataFrame(db.session.query(distinct(V.VehicleGroup)).all(),
                       columns=['VehicleGroup'])
    return res
示例#46
0
文件: compiler.py 项目: martint/ibis
def _hll_cardinality(t, expr):
    # postgres doesn't have a builtin HLL algorithm, so we default to standard
    # count distinct for now
    arg, _ = expr.op().args
    sa_arg = t.translate(arg)
    return sa.func.count(sa.distinct(sa_arg))
示例#47
0
def num_logchannels():
    try:
        return SESSION.query(func.count(distinct(GroupLogs.chat_id))).scalar()
    finally:
        SESSION.close()
def get_labels():
    labels_table = db_tables.sketch_temp_labels_table

    return sql.select([sql.distinct(labels_table.c.label)])
示例#49
0
    min=func.min(models.Sample.volume).label('min'),
    max=func.max(models.Sample.volume).label('max'),
    count=func.count(models.Sample.volume).label('count')
)

UNPARAMETERIZED_AGGREGATES = dict(
    stddev=func.stddev_pop(models.Sample.volume).label('stddev')
)

PARAMETERIZED_AGGREGATES = dict(
    validate=dict(
        cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']
    ),
    compute=dict(
        cardinality=lambda p: func.count(
            distinct(getattr(models.Resource, p))
        ).label('cardinality/%s' % p)
    )
)

AVAILABLE_CAPABILITIES = {
    'meters': {'query': {'simple': True,
                         'metadata': True}},
    'resources': {'query': {'simple': True,
                            'metadata': True}},
    'samples': {'pagination': True,
                'groupby': True,
                'query': {'simple': True,
                          'metadata': True,
                          'complex': True}},
    'statistics': {'groupby': True,
示例#50
0
        In [124]: users.all()[1].name
        Out[124]: u'tobi'
        
        In [127]: users.all()[1].id
        AttributeError: 'result' object has no attribute 'id'       # 可以理解为返回的User类型的实例对象们仅仅只含有name属性了。
        """

        # 5.7 给结果集的列取别名 (label)
        users_7 = db.query(User.name.label(
            'user_name')).all()  # label 标签,别名,为结果对象创建一个新的属性, 旧的 name属性则不保留。
        for user in users_7:
            print user.user_name

        # 5.8 去重查询(distinct)
        from sqlalchemy import distinct
        users_8 = db.query(distinct(User.name).label('name')).all()

        # 5.9 统计查询(func: count, avg, sum)
        from sqlalchemy import func
        user_count = db.query(User.name).order_by(User.name).count()
        age_avg = db.query(func.avg(
            User.age)).first()  # 虽结果仅有一个对象,却不用all(),是因为会在结果再包一层[]。
        age_sum = db.query(func.sum(User.age)).first()

        # 5.10 分组查询
        users = db.query(func.count(User.name).label('count'),
                         User.age).group_by(User.age)  # 将年龄一样的归为一组,统计同年龄的人数
        for user in users:
            print 'age:{0}, count:{1}'.format(user.age, user.count)

    # ------------------------------------------------------------------------------------------------------------------
示例#51
0
def query_ids_with(tag_ids):
    return db.session.query(distinct(Device.id)).\
            join(DeviceToTag).\
            filter(DeviceToTag.tag_id.in_(tag_ids)).\
            group_by(Device.id).\
            having(func.count(DeviceToTag.tag_id) == len(tag_ids))
示例#52
0
def get_all_sites(info_role):
    '''
    Retourne tous les sites
    '''
    parameters = request.args

    id_type_commune = blueprint.config['id_type_commune']


    q = (
        DB.session.query(
            TInfosSite,
            func.max(TBaseVisits.visit_date_min),
            Habref.lb_hab_fr_complet,
            func.count(distinct(TBaseVisits.id_base_visit)),
            func.string_agg(distinct(BibOrganismes.nom_organisme), ', '),
            func.string_agg(distinct(LAreas.area_name), ', ')
            ).outerjoin(
            TBaseVisits, TBaseVisits.id_base_site == TInfosSite.id_base_site
            # get habitat cd_hab
            ).outerjoin(
                Habref, TInfosSite.cd_hab == Habref.cd_hab
            # get organisms of a site
            ).outerjoin(
                corVisitObserver, corVisitObserver.c.id_base_visit == TBaseVisits.id_base_visit
            ).outerjoin(
                User, User.id_role == corVisitObserver.c.id_role
            ).outerjoin(
                BibOrganismes, BibOrganismes.id_organisme == User.id_organisme
            )
            # get municipalities of a site
            .outerjoin(
                corSiteArea, corSiteArea.c.id_base_site == TInfosSite.id_base_site
            ).outerjoin(
                LAreas, and_(LAreas.id_area == corSiteArea.c.id_area, LAreas.id_type == id_type_commune)
            )
            .group_by(
                TInfosSite, Habref.lb_hab_fr_complet
            )
        )


    if 'cd_hab' in parameters:
        q = q.filter(TInfosSite.cd_hab == parameters['cd_hab'])
    
    if 'id_base_site' in parameters:
        q = q.filter(TInfosSite.id_base_site == parameters['id_base_site'])

    if 'organisme' in parameters:
        q = q.filter(BibOrganismes.id_organisme == parameters['organisme'])

    if 'commune' in parameters:
        q = q.filter(LAreas.area_name == parameters['commune'])

    if 'year' in parameters:
        # relance la requête pour récupérer la date_max exacte si on filtre sur l'année
        q_year = (
            DB.session.query(
                TInfosSite.id_base_site,
                func.max(TBaseVisits.visit_date_min),
            ).outerjoin(
                TBaseVisits, TBaseVisits.id_base_site == TInfosSite.id_base_site
            ).group_by(TInfosSite.id_base_site)
        )

        data_year = q_year.all()

        q = q.filter(func.date_part('year', TBaseVisits.visit_date_min) == parameters['year'])
    
    page = request.args.get('page', 1, type=int)
    items_per_page = blueprint.config['items_per_page']
    pagination_serverside = blueprint.config['pagination_serverside']

    if (pagination_serverside):
        pagination = q.paginate(page, items_per_page, False)
        data = pagination.items
        totalItmes = pagination.total
    else:
        totalItmes = 0
        data = q.all()

    pageInfo= {
        'totalItmes' : totalItmes,
        'items_per_page' : items_per_page,
    }
    features = []

    if data:
        for d in data:
            feature = d[0].get_geofeature()
            id_site = feature['properties']['id_base_site']
            base_site_code = feature['properties']['t_base_site']['base_site_code']
            base_site_description = feature['properties']['t_base_site']['base_site_description'] or 'Aucune description'
            base_site_name = feature['properties']['t_base_site']['base_site_name']
            if feature['properties']['t_base_site']:
                del feature['properties']['t_base_site']
            if 'year' in parameters:
                for dy in data_year:
                    #  récupérer la bonne date max du site si on filtre sur année
                    if id_site == dy[0]:
                        feature['properties']['date_max'] = str(d[1])
            else:
                feature['properties']['date_max'] = str(d[1])
                if d[1] == None:
                    feature['properties']['date_max'] = 'Aucune visite'
            feature['properties']['nom_habitat'] = str(d[2])
            feature['properties']['nb_visit'] = str(d[3])
            feature['properties']['organisme'] = str(d[4])
            feature['properties']['nom_commune'] = str(d[5])
            if d[4] == None:
                feature['properties']['organisme'] = 'Aucun'
            feature['properties']['base_site_code'] = base_site_code
            feature['properties']['base_site_description'] = base_site_description
            feature['properties']['base_site_name'] = base_site_name
            features.append(feature)

        return [pageInfo,FeatureCollection(features)]
    return None
示例#53
0
文件: Assets.py 项目: qqpals/opsweb
def assets_deploy(room=None,busi=None,idc=None):
    CONFS = defaultdict
    INFOS = defaultdict
    BUSIS = defaultdict
    busi_vals = defaultdict
    idc_vals = defaultdict
    db_dic_id = db_idc.idc_id
    db_server = db_idc.idc_servers
    db_third = db_idc.third_resource
    db_project = db_op.project_list
    db_busi = db_op.business
    db_project_third = db_op.project_third
    try:
        if room:
            try:
                idcs = db_dic_id.query.with_entities(db_dic_id.id).filter(db_dic_id.aid == room).all()
                if idcs:
                    #获取机房信息
                    idcs = tuple([int(idc[0]) for idc in idcs])
                    #获取资产信息
                    vals = db_server.query.with_entities(db_server.hostname,db_server.ip,db_server.ssh_port,db_server.host_type,db_server.cpu_core,db_server.mem).filter(db_server.idc_id.in_(idcs)).all()
            except Exception as e:
                logging.error(e)
        if busi:
            try:
                busi_id = db_busi.query.with_entities(db_busi.id).filter(db_busi.business==busi).all()
                busi_id = busi_id[0][0]
                projects = db_project.query.with_entities(distinct(db_project.project)).filter(db_project.business_id==busi_id).all()
                projects = tuple([project[0] for project in projects])
                ids = db_project_third.query.with_entities(db_project_third.third_id).filter(db_project_third.project.in_(projects)).all()
                third_ids = [id[0] for id in ids]
                self_ips = db_project.query.with_entities(distinct(db_project.ip)).filter(db_project.project.in_(projects)).all()
                self_ips = [ip[0] for ip in self_ips]
                third_ips = db_third.query.with_entities(distinct(db_third.ip)).filter(db_third.id.in_(tuple(third_ids))).all()
                third_ips = [ip[0] for ip in third_ips]
                if idc:
                    idc_ids = db_dic_id.query.with_entities(db_dic_id.id).filter(db_dic_id.aid==idc).all()
                    idc_ids = tuple([id[0] for id in idc_ids])
                    vals = db_server.query.with_entities(db_server.hostname, db_server.ip, db_server.ssh_port,
                                                         db_server.host_type, db_server.cpu_core, db_server.mem).filter(and_(
                        db_server.ip.in_(tuple(self_ips + third_ips)),db_server.idc_id.in_(idc_ids))).all()
                else:
                    vals = db_server.query.with_entities(db_server.hostname, db_server.ip, db_server.ssh_port,
                                                     db_server.host_type, db_server.cpu_core, db_server.mem).filter(db_server.ip.in_(tuple(self_ips + third_ips))).all()
            except Exception as e:
                logging.error(e)
        try:
            CONFS = {val[0]:val[3:] for val in vals}
            INFOS = {val[0]:[] for val in vals}
            BUSIS = {val[0]: [] for val in vals}
            hostnames = {val[1]:val[0] for val in vals}
            ips = tuple([val[1] for val in vals])
            if room:
                #获取业务信息
                busi_vals = db_busi.query.with_entities(db_busi.id,db_busi.business).all()
                busi_vals = {b[0]:b[1] for b in busi_vals}
            if busi:
                #获取机房信息
                idc_vals = db_dic_id.query.with_entities(db_dic_id.id,db_dic_id.aid).all()
                idc_vals = {int(b[0]): b[1] for b in idc_vals}
            IDCS = db_server.query.with_entities(db_server.hostname,db_server.idc_id).filter(db_server.ip.in_(ips)).all()
            IDCS = {idc[0]:[int(idc[1])] for idc in IDCS}
        except Exception as e:
            logging.error(e)
        #获取应用服务信息
        try:
            third_vals = db_third.query.with_entities(db_third.resource_type,db_third.ip).filter(db_third.ip.in_(ips)).all()
            for info in third_vals:
                resource,ip = info
                INFOS[hostnames[ip]].append(resource)
            project_vals = db_project.query.with_entities(db_project.resource,db_project.ip,db_project.business_id).filter(db_project.ip.in_(ips)).all()
            for info in project_vals:
                resource,ip,business_id = info
                BUSIS[hostnames[ip]].append(business_id)
                INFOS[hostnames[ip]].append(resource)
        except Exception as e:
            logging.error(e)
        #格式化数据
        try:
            if INFOS:
                for info in INFOS:
                    INFOS[info] = set(INFOS[info])
            if BUSIS:
                for info in BUSIS:
                    BUSIS[info] = set(BUSIS[info])
        except Exception as e:
            logging.error(e)
    except Exception as e:
        logging.error(e)
    if room:
        tables = ['主机名', '部署应用', '线上业务', '主机类型', 'CPU核数','内存']
        return render_template('assets_deploy.html',INFOS=INFOS,BUSIS=BUSIS,tables=tables,CONFS=CONFS,busi_vals=busi_vals,room=room,busi=busi)
    if busi:
        tables = ['主机名', '部署应用', '机房', '主机类型', 'CPU核数','内存']
        return render_template('assets_deploy.html',INFOS=INFOS,BUSIS=IDCS,tables=tables,CONFS=CONFS,busi_vals=idc_vals,room=room,busi=busi)
示例#54
0
 def view_user_count(self):
     return (select([func.count(distinct(
         PageView.user_id))]).where(PageView.object_id == self.id).where(
             PageView.object_type == 'post').label("view_user_count"))
def get_student_data(grade_bounds):
    metadata = sql.MetaData()

    all_snapshots = db_tables.clean_all_snapshots_table
    hs_grade_gpa = get_students_grade_gpa().cte('hs_grade_gpa')
    inv_table = db_tables.clean_intervention_table

    get_ordered_array = lambda c, o: db_func.array_agg(aggregate_order_by(
        c, o))

    discipline_incidents_rate = \
        db_func.sum(all_snapshots.c.discipline_incidents) /\
            db_func.count(sql.distinct(all_snapshots.c.school_year))

    absenteeism_rate = db_func.sum(all_snapshots.c.days_absent) /\
            db_func.count(sql.distinct(all_snapshots.c.school_year))

    unexcused_absenteeism_rate = db_func.sum(all_snapshots.c.days_absent_unexcused) /\
        db_func.count(sql.distinct(all_snapshots.c.school_year))

    basic_info = sql.select([
                    all_snapshots.c.student_lookup,
                    db_func.max(all_snapshots.c.gender).label('gender'),
                    db_func.max(all_snapshots.c.ethnicity).label('ethnicity'),
                    discipline_incidents_rate.label('discipline_incidents_rate'),
                    absenteeism_rate.label('absenteeism_rate'),
                    unexcused_absenteeism_rate.label('unexcused_absenteeism_rate'),
                    db_func.array_agg(sql.distinct(all_snapshots.c.disability)).label('disabilities'),
                    db_func.array_agg(sql.distinct(all_snapshots.c.disadvantagement)).label('disadvantagements'),
                    db_func.array_agg(sql.distinct(all_snapshots.c.limited_english)).label('limited_english'),
                    db_func.array_agg(sql.distinct(all_snapshots.c.special_ed)).label('special_ed'),
                    db_func.max(all_snapshots.c.graduation_date).label('graduation_date'),
                    get_ordered_array(all_snapshots.c.school_code, all_snapshots.c.grade).label('school_codes'),
                    get_ordered_array(all_snapshots.c.school_name, all_snapshots.c.grade).label('school_names'),
                    get_ordered_array(all_snapshots.c.grade, all_snapshots.c.grade).label('snapshots_grades'),
                    get_ordered_array(all_snapshots.c.school_year, all_snapshots.c.grade).label('snapshots_school_years')
                ]).\
                where(
                    sql.and_(
                        all_snapshots.c.grade >= grade_bounds[0],
                        all_snapshots.c.grade <= grade_bounds[1]
                    )
                ).\
                group_by(
                    all_snapshots.c.student_lookup
                ).cte('basic_info')

    hs_gpa_info = sql.select([
        hs_grade_gpa.c.student_lookup,
        get_ordered_array(hs_grade_gpa.c.gpa,
                          hs_grade_gpa.c.grade).label('gpas'),
        get_ordered_array(hs_grade_gpa.c.grade,
                          hs_grade_gpa.c.grade).label('hs_grades'),
        get_ordered_array(hs_grade_gpa.c.school_year,
                          hs_grade_gpa.c.grade).label('hs_school_years'),
        get_ordered_array(hs_grade_gpa.c.num_classes,
                          hs_grade_gpa.c.grade).label('num_classes')
    ]).where(
        sql.and_(hs_grade_gpa.c.grade >= grade_bounds[0],
                 hs_grade_gpa.c.grade <= grade_bounds[1])).group_by(
                     hs_grade_gpa.c.student_lookup).cte('hs_gpa_info')

    inv_info = sql.select([
        inv_table.c.student_lookup,
        get_ordered_array(inv_table.c.inv_group,
                          inv_table.c.grade).label('inv_groups'),
        get_ordered_array(inv_table.c.membership_code,
                          inv_table.c.grade).label('membership_codes'),
        get_ordered_array(inv_table.c.grade,
                          inv_table.c.grade).label('inv_grades'),
        get_ordered_array(inv_table.c.school_year,
                          inv_table.c.grade).label('inv_school_years'),
    ]).where(
        sql.and_(inv_table.c.grade >= grade_bounds[0],
                 inv_table.c.grade <= grade_bounds[1])).group_by(
                     inv_table.c.student_lookup).cte('inv_info')

    labels = db_tables.sketch_temp_labels_table

    to_join = [basic_info, hs_gpa_info, inv_info, labels]

    joined = to_join[0]
    for i in range(1, len(to_join)):
        if i == 1:
            on_clause = (
                joined.c.student_lookup == to_join[i].c.student_lookup)
        else:
            on_clause = (
                joined.c[to_join[0].name +
                         '_student_lookup'] == to_join[i].c.student_lookup)

        joined = sql.join(left=joined,
                          right=to_join[i],
                          onclause=on_clause,
                          isouter=True)

    cs = []
    added_student_lookup = False
    for c in joined.c:
        if c.name == 'student_lookup':
            if not added_student_lookup:
                cs.append(c)
                added_student_lookup = True
        else:
            cs.append(c)

    return sql.select(cs).select_from(joined)
示例#56
0
 def view_user_count(self):
     return (db.session.query(func.count(distinct(
         PageView.user_id))).filter(PageView.object_id == self.id).filter(
             PageView.object_type == 'post').scalar())
示例#57
0
class SqlaTable(Model, BaseDatasource):
    """An ORM object for SqlAlchemy table references"""

    type = 'table'
    query_language = 'sql'
    metric_class = SqlMetric
    column_class = TableColumn

    __tablename__ = 'tables'
    __table_args__ = (UniqueConstraint('database_id', 'table_name'), )

    table_name = Column(String(250))
    main_dttm_col = Column(String(250))
    database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
    fetch_values_predicate = Column(String(1000))
    user_id = Column(Integer, ForeignKey('ab_user.id'))
    owner = relationship(security_manager.user_model,
                         backref='tables',
                         foreign_keys=[user_id])
    database = relationship('Database',
                            backref=backref('tables',
                                            cascade='all, delete-orphan'),
                            foreign_keys=[database_id])
    schema = Column(String(255))
    sql = Column(Text)
    is_sqllab_view = Column(Boolean, default=False)
    template_params = Column(Text)

    baselink = 'tablemodelview'

    export_fields = ('table_name', 'main_dttm_col', 'description',
                     'default_endpoint', 'database_id', 'offset',
                     'cache_timeout', 'schema', 'sql', 'params',
                     'template_params', 'filter_select_enabled')
    update_from_object_fields = [
        f for f in export_fields if f not in ('table_name', 'database_id')
    ]
    export_parent = 'database'
    export_children = ['metrics', 'columns']

    sqla_aggregations = {
        'COUNT_DISTINCT':
        lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
        'COUNT':
        sa.func.COUNT,
        'SUM':
        sa.func.SUM,
        'AVG':
        sa.func.AVG,
        'MIN':
        sa.func.MIN,
        'MAX':
        sa.func.MAX,
    }

    def __repr__(self):
        return self.name

    @property
    def connection(self):
        return str(self.database)

    @property
    def description_markeddown(self):
        return utils.markdown(self.description)

    @property
    def datasource_name(self):
        return self.table_name

    @property
    def database_name(self):
        return self.database.name

    @property
    def link(self):
        name = escape(self.name)
        anchor = '<a target="_blank" href="{self.explore_url}">{name}</a>'
        return Markup(anchor.format(**locals()))

    @property
    def schema_perm(self):
        """Returns schema permission if present, database one otherwise."""
        return security_manager.get_schema_perm(self.database, self.schema)

    def get_perm(self):
        return ('[{obj.database}].[{obj.table_name}]'
                '(id:{obj.id})').format(obj=self)

    @property
    def name(self):
        if not self.schema:
            return self.table_name
        return '{}.{}'.format(self.schema, self.table_name)

    @property
    def full_name(self):
        return utils.get_datasource_full_name(self.database,
                                              self.table_name,
                                              schema=self.schema)

    @property
    def dttm_cols(self):
        l = [c.column_name for c in self.columns if c.is_dttm]  # noqa: E741
        if self.main_dttm_col and self.main_dttm_col not in l:
            l.append(self.main_dttm_col)
        return l

    @property
    def num_cols(self):
        return [c.column_name for c in self.columns if c.is_num]

    @property
    def any_dttm_col(self):
        cols = self.dttm_cols
        if cols:
            return cols[0]

    @property
    def html(self):
        t = ((c.column_name, c.type) for c in self.columns)
        df = pd.DataFrame(t)
        df.columns = ['field', 'type']
        return df.to_html(
            index=False,
            classes=('dataframe table table-striped table-bordered '
                     'table-condensed'))

    @property
    def sql_url(self):
        return self.database.sql_url + '?table_name=' + str(self.table_name)

    def external_metadata(self):
        cols = self.database.get_columns(self.table_name, schema=self.schema)
        for col in cols:
            col['type'] = '{}'.format(col['type'])
        return cols

    @property
    def time_column_grains(self):
        return {
            'time_columns': self.dttm_cols,
            'time_grains': [grain.name for grain in self.database.grains()],
        }

    @property
    def select_star(self):
        # show_cols and latest_partition set to false to avoid
        # the expensive cost of inspecting the DB
        return self.database.select_star(self.name,
                                         show_cols=False,
                                         latest_partition=False)

    def get_col(self, col_name):
        columns = self.columns
        for col in columns:
            if col_name == col.column_name:
                return col

    @property
    def data(self):
        d = super(SqlaTable, self).data
        if self.type == 'table':
            grains = self.database.grains() or []
            if grains:
                grains = [(g.duration, g.name) for g in grains]
            d['granularity_sqla'] = utils.choicify(self.dttm_cols)
            d['time_grain_sqla'] = grains
            d['main_dttm_col'] = self.main_dttm_col
        return d

    def values_for_column(self, column_name, limit=10000):
        """Runs query against sqla to retrieve some
        sample values for the given column.
        """
        cols = {col.column_name: col for col in self.columns}
        target_col = cols[column_name]
        tp = self.get_template_processor()

        qry = (select([target_col.get_sqla_col()
                       ]).select_from(self.get_from_clause(tp)).distinct())
        if limit:
            qry = qry.limit(limit)

        if self.fetch_values_predicate:
            tp = self.get_template_processor()
            qry = qry.where(tp.process_template(self.fetch_values_predicate))

        engine = self.database.get_sqla_engine()
        sql = '{}'.format(
            qry.compile(engine, compile_kwargs={'literal_binds': True}), )
        sql = self.mutate_query_from_config(sql)

        df = pd.read_sql_query(sql=sql, con=engine)
        return [row[0] for row in df.to_records(index=False)]

    def mutate_query_from_config(self, sql):
        """Apply config's SQL_QUERY_MUTATOR

        Typically adds comments to the query with context"""
        SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
        if SQL_QUERY_MUTATOR:
            username = utils.get_username()
            sql = SQL_QUERY_MUTATOR(sql, username, security_manager,
                                    self.database)
        return sql

    def get_template_processor(self, **kwargs):
        return get_template_processor(table=self,
                                      database=self.database,
                                      **kwargs)

    def get_query_str(self, query_obj):
        qry = self.get_sqla_query(**query_obj)
        sql = self.database.compile_sqla_query(qry)
        logging.info(sql)
        sql = sqlparse.format(sql, reindent=True)
        if query_obj['is_prequery']:
            query_obj['prequeries'].append(sql)
        sql = self.mutate_query_from_config(sql)
        return sql

    def get_sqla_table(self):
        tbl = table(self.table_name)
        if self.schema:
            tbl.schema = self.schema
        return tbl

    def get_from_clause(self, template_processor=None):
        # Supporting arbitrary SQL statements in place of tables
        if self.sql:
            from_sql = self.sql
            if template_processor:
                from_sql = template_processor.process_template(from_sql)
            from_sql = sqlparse.format(from_sql, strip_comments=True)
            return TextAsFrom(sa.text(from_sql), []).alias('expr_qry')
        return self.get_sqla_table()

    def adhoc_metric_to_sqla(self, metric, cols):
        """
        Turn an adhoc metric into a sqlalchemy column.

        :param dict metric: Adhoc metric definition
        :param dict cols: Columns for the current table
        :returns: The metric defined as a sqlalchemy column
        :rtype: sqlalchemy.sql.column
        """
        expression_type = metric.get('expressionType')
        db_engine_spec = self.database.db_engine_spec
        label = db_engine_spec.make_label_compatible(metric.get('label'))

        if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:
            column_name = metric.get('column').get('column_name')
            sqla_column = column(column_name)
            table_column = cols.get(column_name)

            if table_column:
                sqla_column = table_column.get_sqla_col()

            sqla_metric = self.sqla_aggregations[metric.get('aggregate')](
                sqla_column)
            sqla_metric = sqla_metric.label(label)
            return sqla_metric
        elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:
            sqla_metric = literal_column(metric.get('sqlExpression'))
            sqla_metric = sqla_metric.label(label)
            return sqla_metric
        else:
            return None

    def get_sqla_query(  # sqla
        self,
        groupby,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        filter=None,  # noqa
        is_timeseries=True,
        timeseries_limit=15,
        timeseries_limit_metric=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        orderby=None,
        extras=None,
        columns=None,
        order_desc=True,
        prequeries=None,
        is_prequery=False,
    ):
        """Querying any sqla table from this common interface"""
        template_kwargs = {
            'from_dttm': from_dttm,
            'groupby': groupby,
            'metrics': metrics,
            'row_limit': row_limit,
            'to_dttm': to_dttm,
            'filter': filter,
            'columns': {col.column_name: col
                        for col in self.columns},
        }
        template_kwargs.update(self.template_params_dict)
        template_processor = self.get_template_processor(**template_kwargs)
        db_engine_spec = self.database.db_engine_spec

        orderby = orderby or []

        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        # Database spec supports join-free timeslot grouping
        time_groupby_inline = db_engine_spec.time_groupby_inline

        cols = {col.column_name: col for col in self.columns}
        metrics_dict = {m.metric_name: m for m in self.metrics}

        if not granularity and is_timeseries:
            raise Exception(
                _('Datetime column not provided as part table configuration '
                  'and is required by this type of chart'))
        if not groupby and not metrics and not columns:
            raise Exception(_('Empty query?'))
        metrics_exprs = []
        for m in metrics:
            if utils.is_adhoc_metric(m):
                metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
            elif m in metrics_dict:
                metrics_exprs.append(metrics_dict.get(m).get_sqla_col())
            else:
                raise Exception(_("Metric '{}' is not valid".format(m)))
        if metrics_exprs:
            main_metric_expr = metrics_exprs[0]
        else:
            main_metric_expr = literal_column('COUNT(*)').label(
                db_engine_spec.make_label_compatible('count'))

        select_exprs = []
        groupby_exprs = []

        if groupby:
            select_exprs = []
            inner_select_exprs = []
            inner_groupby_exprs = []
            for s in groupby:
                col = cols[s]
                outer = col.get_sqla_col()
                inner = col.get_sqla_col(col.column_name + '__')

                groupby_exprs.append(outer)
                select_exprs.append(outer)
                inner_groupby_exprs.append(inner)
                inner_select_exprs.append(inner)
        elif columns:
            for s in columns:
                select_exprs.append(cols[s].get_sqla_col())
            metrics_exprs = []

        if granularity:
            dttm_col = cols[granularity]
            time_grain = extras.get('time_grain_sqla')
            time_filters = []

            if is_timeseries:
                timestamp = dttm_col.get_timestamp_expression(time_grain)
                select_exprs += [timestamp]
                groupby_exprs += [timestamp]

            # Use main dttm column to support index with secondary dttm columns
            if db_engine_spec.time_secondary_columns and \
                    self.main_dttm_col in self.dttm_cols and \
                    self.main_dttm_col != dttm_col.column_name:
                time_filters.append(cols[self.main_dttm_col].get_time_filter(
                    from_dttm, to_dttm))
            time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))

        select_exprs += metrics_exprs
        qry = sa.select(select_exprs)

        tbl = self.get_from_clause(template_processor)

        if not columns:
            qry = qry.group_by(*groupby_exprs)

        where_clause_and = []
        having_clause_and = []
        for flt in filter:
            if not all([flt.get(s) for s in ['col', 'op']]):
                continue
            col = flt['col']
            op = flt['op']
            col_obj = cols.get(col)
            if col_obj:
                is_list_target = op in ('in', 'not in')
                eq = self.filter_values_handler(
                    flt.get('val'),
                    target_column_is_numeric=col_obj.is_num,
                    is_list_target=is_list_target)
                if op in ('in', 'not in'):
                    cond = col_obj.get_sqla_col().in_(eq)
                    if '<NULL>' in eq:
                        cond = or_(cond,
                                   col_obj.get_sqla_col() == None)  # noqa
                    if op == 'not in':
                        cond = ~cond
                    where_clause_and.append(cond)
                else:
                    if col_obj.is_num:
                        eq = utils.string_to_num(flt['val'])
                    if op == '==':
                        where_clause_and.append(col_obj.get_sqla_col() == eq)
                    elif op == '!=':
                        where_clause_and.append(col_obj.get_sqla_col() != eq)
                    elif op == '>':
                        where_clause_and.append(col_obj.get_sqla_col() > eq)
                    elif op == '<':
                        where_clause_and.append(col_obj.get_sqla_col() < eq)
                    elif op == '>=':
                        where_clause_and.append(col_obj.get_sqla_col() >= eq)
                    elif op == '<=':
                        where_clause_and.append(col_obj.get_sqla_col() <= eq)
                    elif op == 'LIKE':
                        where_clause_and.append(
                            col_obj.get_sqla_col().like(eq))
                    elif op == 'IS NULL':
                        where_clause_and.append(
                            col_obj.get_sqla_col() == None)  # noqa
                    elif op == 'IS NOT NULL':
                        where_clause_and.append(
                            col_obj.get_sqla_col() != None)  # noqa
        if extras:
            where = extras.get('where')
            if where:
                where = template_processor.process_template(where)
                where_clause_and += [sa.text('({})'.format(where))]
            having = extras.get('having')
            if having:
                having = template_processor.process_template(having)
                having_clause_and += [sa.text('({})'.format(having))]
        if granularity:
            qry = qry.where(and_(*(time_filters + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))

        if not orderby and not columns:
            orderby = [(main_metric_expr, not order_desc)]

        for col, ascending in orderby:
            direction = asc if ascending else desc
            if utils.is_adhoc_metric(col):
                col = self.adhoc_metric_to_sqla(col, cols)
            qry = qry.order_by(direction(col))

        if row_limit:
            qry = qry.limit(row_limit)

        if is_timeseries and \
                timeseries_limit and groupby and not time_groupby_inline:
            if self.database.db_engine_spec.inner_joins:
                # some sql dialects require for order by expressions
                # to also be in the select clause -- others, e.g. vertica,
                # require a unique inner alias
                inner_main_metric_expr = main_metric_expr.label('mme_inner__')
                inner_select_exprs += [inner_main_metric_expr]
                subq = select(inner_select_exprs)
                subq = subq.select_from(tbl)
                inner_time_filter = dttm_col.get_time_filter(
                    inner_from_dttm or from_dttm,
                    inner_to_dttm or to_dttm,
                )
                subq = subq.where(
                    and_(*(where_clause_and + [inner_time_filter])))
                subq = subq.group_by(*inner_groupby_exprs)

                ob = inner_main_metric_expr
                if timeseries_limit_metric:
                    if utils.is_adhoc_metric(timeseries_limit_metric):
                        ob = self.adhoc_metric_to_sqla(timeseries_limit_metric,
                                                       cols)
                    elif timeseries_limit_metric in metrics_dict:
                        timeseries_limit_metric = metrics_dict.get(
                            timeseries_limit_metric, )
                        ob = timeseries_limit_metric.get_sqla_col()
                    else:
                        raise Exception(_(
                            "Metric '{}' is not valid".format(m)))
                direction = desc if order_desc else asc
                subq = subq.order_by(direction(ob))
                subq = subq.limit(timeseries_limit)

                on_clause = []
                for i, gb in enumerate(groupby):
                    on_clause.append(groupby_exprs[i] == column(gb + '__'))

                tbl = tbl.join(subq.alias(), and_(*on_clause))
            else:
                # run subquery to get top groups
                subquery_obj = {
                    'prequeries': prequeries,
                    'is_prequery': True,
                    'is_timeseries': False,
                    'row_limit': timeseries_limit,
                    'groupby': groupby,
                    'metrics': metrics,
                    'granularity': granularity,
                    'from_dttm': inner_from_dttm or from_dttm,
                    'to_dttm': inner_to_dttm or to_dttm,
                    'filter': filter,
                    'orderby': orderby,
                    'extras': extras,
                    'columns': columns,
                    'order_desc': True,
                }
                result = self.query(subquery_obj)
                dimensions = [c for c in result.df.columns if c not in metrics]
                top_groups = self._get_top_groups(result.df, dimensions)
                qry = qry.where(top_groups)

        return qry.select_from(tbl)

    def _get_top_groups(self, df, dimensions):
        cols = {col.column_name: col for col in self.columns}
        groups = []
        for unused, row in df.iterrows():
            group = []
            for dimension in dimensions:
                col_obj = cols.get(dimension)
                group.append(col_obj.get_sqla_col() == row[dimension])
            groups.append(and_(*group))

        return or_(*groups)

    def query(self, query_obj):
        qry_start_dttm = datetime.now()
        sql = self.get_query_str(query_obj)
        status = QueryStatus.SUCCESS
        error_message = None
        df = None
        try:
            df = self.database.get_df(sql, self.schema)
        except Exception as e:
            status = QueryStatus.FAILED
            logging.exception(e)
            error_message = (
                self.database.db_engine_spec.extract_error_message(e))

        # if this is a main query with prequeries, combine them together
        if not query_obj['is_prequery']:
            query_obj['prequeries'].append(sql)
            sql = ';\n\n'.join(query_obj['prequeries'])
        sql += ';'

        return QueryResult(status=status,
                           df=df,
                           duration=datetime.now() - qry_start_dttm,
                           query=sql,
                           error_message=error_message)

    def get_sqla_table_object(self):
        return self.database.get_table(self.table_name, schema=self.schema)

    def fetch_metadata(self):
        """Fetches the metadata for the table and merges it in"""
        try:
            table = self.get_sqla_table_object()
        except Exception:
            raise Exception(
                _("Table [{}] doesn't seem to exist in the specified database, "
                  "couldn't fetch column information").format(self.table_name))

        M = SqlMetric  # noqa
        metrics = []
        any_date_col = None
        db_dialect = self.database.get_dialect()
        dbcols = (db.session.query(TableColumn).filter(
            TableColumn.table == self).filter(
                or_(TableColumn.column_name == col.name
                    for col in table.columns)))
        dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}
        db_engine_spec = self.database.db_engine_spec

        for col in table.columns:
            try:
                datatype = col.type.compile(dialect=db_dialect).upper()
            except Exception as e:
                datatype = 'UNKNOWN'
                logging.error('Unrecognized data type in {}.{}'.format(
                    table, col.name))
                logging.exception(e)
            dbcol = dbcols.get(col.name, None)
            if not dbcol:
                dbcol = TableColumn(column_name=col.name, type=datatype)
                dbcol.groupby = dbcol.is_string
                dbcol.filterable = dbcol.is_string
                dbcol.sum = dbcol.is_num
                dbcol.avg = dbcol.is_num
                dbcol.is_dttm = dbcol.is_time
            else:
                dbcol.type = datatype
            self.columns.append(dbcol)
            if not any_date_col and dbcol.is_time:
                any_date_col = col.name
            metrics += dbcol.get_metrics().values()

        metrics.append(
            M(
                metric_name='count',
                verbose_name='COUNT(*)',
                metric_type='count',
                expression='COUNT(*)',
            ))
        if not self.main_dttm_col:
            self.main_dttm_col = any_date_col
        for metric in metrics:
            metric.metric_name = db_engine_spec.mutate_expression_label(
                metric.metric_name)
        self.add_missing_metrics(metrics)
        db.session.merge(self)
        db.session.commit()

    @classmethod
    def import_obj(cls, i_datasource, import_time=None):
        """Imports the datasource from the object to the database.

         Metrics and columns and datasource will be overrided if exists.
         This function can be used to import/export dashboards between multiple
         superset instances. Audit metadata isn't copies over.
        """
        def lookup_sqlatable(table):
            return db.session.query(SqlaTable).join(Database).filter(
                SqlaTable.table_name == table.table_name,
                SqlaTable.schema == table.schema,
                Database.id == table.database_id,
            ).first()

        def lookup_database(table):
            return db.session.query(Database).filter_by(
                database_name=table.params_dict['database_name']).one()

        return import_util.import_datasource(db.session, i_datasource,
                                             lookup_database, lookup_sqlatable,
                                             import_time)

    @classmethod
    def query_datasources_by_name(cls,
                                  session,
                                  database,
                                  datasource_name,
                                  schema=None):
        query = (session.query(cls).filter_by(
            database_id=database.id).filter_by(table_name=datasource_name))
        if schema:
            query = query.filter_by(schema=schema)
        return query.all()

    @staticmethod
    def default_query(qry):
        return qry.filter_by(is_sqllab_view=False)
示例#58
0
def getVioType(db, V):
    res = pd.DataFrame(db.session.query(distinct(V.ViolationType)).all(),
                       columns=['ViolationType'])
    return res
示例#59
0
def num_chats():
    try:
        return SESSION.query(func.count(distinct(Disable.chat_id))).scalar()
    finally:
        SESSION.close()
示例#60
0
    def check_lines_for_joints():
        #перебираем все линии и считаем стыки (толщину берем по трубе)

        session.query(JOINT).delete()
        session.commit()

        for a in session.query(label('lname', distinct(LINE.name))):
            b = session.query(LINE.tag, LINE.dlina, LINE.st_type,
                              LINE.pn_status, LINE.line_from, LINE.line_to,
                              LINE.poz).filter(LINE.name == a.lname).order_by(
                                  LINE.poz)
            for i in range(b.count()):
                det = session.query(SO.type, SO.s, SO.d, SO.tag,
                                    SO.dy).filter(SO.tag == b[i].tag)[0]
                if det.type == "t":
                    #если это первый участок линии
                    if i == 0:
                        mesto_reza[det.dy] += 2  #кусок трубы = 2 места реза
                        if (b[i].st_type == 0) or (
                                b[i].st_type == 2
                        ):  #надо посчитать стык до первого участка линии.
                            calc_joint(b[i].line_from, b[i].pn_status,
                                       b[i].tag, b[i].pn_status)
                    else:
                        #не первый участок - просто добавляем стык, если предыдущая труба отличается от текущей или их pn статус одинаков
                        if det.tag != b[i - 1].tag or b[i].pn_status == b[
                                i - 1].pn_status:
                            mesto_reza[
                                det.dy] += 2  #кусок трубы = 2 места реза
                            calc_joint(b[i - 1].tag, b[i - 1].pn_status,
                                       b[i].tag, b[i].pn_status)
                    #проверка на стыки по длине трубы
                    if det.d < 300:
                        max_l = 9
                    elif det.d < 1000:
                        max_l = 10.5
                    else:
                        max_l = 11.3
                    if b[i].dlina > max_l:
                        for k in range(int(b[i].dlina / max_l)):
                            calc_joint(b[i].tag, b[i].pn_status, b[i].tag,
                                       b[i].pn_status)

                #остальные детали кроме труб
                else:
                    #первую деталь пропускаем
                    if i == 0:
                        if (b[i].st_type == 0) or (
                                b[i].st_type == 2
                        ):  #надо посчитать стык до первого участка линии.
                            calc_joint(b[i].line_from, b[i].pn_status,
                                       b[i].tag, b[i].pn_status)
                    else:
                        calc_joint(b[i - 1].tag, b[i - 1].pn_status, b[i].tag,
                                   b[i].pn_status)

                #если это последний участок линии
                if i == b.count() - 1:
                    if (b[i].st_type == 0) or (
                            b[i].st_type == 1
                    ):  #надо посчитать стык после последнего участка линии.
                        calc_joint(b[i].line_to, b[i].pn_status, b[i].tag,
                                   b[i].pn_status)