コード例 #1
0
def check_maximum_weight(user):
    max_weight_days = 30
    end_date = datetime.datetime.now()
    start_date = end_date - datetime.timedelta(days=max_weight_days)
    with get_session_manager() as session_manager:
        (max_allowed_weight,) = session_manager.query(Users.max_allowed_weight).filter(Users.user == user).first()
        (max_weight,) = session_manager.query(func.max(Weights.weight)).filter(and_(Weights.user == user,
                                                                                    Weights.date > start_date,
                                                                                    Weights.date <= end_date)).first()
        if max_weight < max_allowed_weight -1:
            update_max_allowed_weight(user)
            return 'Fantastic for the last {days} your weights has been more than 1 kg under the maximum of {max_allowed_weight} kg allowed weight\n' \
                   ' - we lower maximum allowed weight by 1 kg'.format(days=max_weight_days,
                                                                       max_allowed_weight=max_allowed_weight)
        start_date = end_date - datetime.timedelta(days=7)
        (max_count,) = session_manager.query(func.count(Weights.weight)).filter(and_(Weights.user == user,
                                                                                    Weights.date > start_date,
                                                                                    Weights.date <= end_date,
                                                                                    Weights.weight > max_allowed_weight)).first()
        if max_count > 0:
            return 'Warning - during the last {days} days {count} weights have been over the allowed maximum weight of {max_allowed_weight} kg.\n' \
                    '\nNow is the time to get it together.'.format(days=7,
                                                              count=max_count,
                                                              max_allowed_weight=max_allowed_weight)
    return ''
コード例 #2
0
ファイル: assembly.py プロジェクト: GavinHuttley/pycogent
def location_query(table, query_start, query_end,
    start_col = 'seq_region_start', end_col = 'seq_region_end', query = None,
    where = 'overlap'):
    # TODO should we allow for spans, overlaps, within?
    # the union result is a complex query and has be appended to any other queries
    # in which it's being employed
    # should we be setting default values here regarding the columns that start/end
    # are pulled from, or explicitly state which columns
    if query is None:
        query = sql.select([table])
    
    if where == 'within':
        query.append_whereclause(sql.and_(table.c[start_col] < query_start,
                                         table.c[end_col] > query_end))
    else:
        query.append_whereclause(
            sql.or_(sql.and_(table.c[start_col] < query_start,
                                        table.c[end_col] > query_end),
                                sql.and_(table.c[start_col] >= query_start,
                                        table.c[start_col] <= query_end),
                                sql.and_(table.c[end_col] >= query_start,
                                        table.c[end_col] <= query_end)))
    # the union is only being used here to order the results
    # that usage imposes the limitation this function must be appended to
    # other queries components being built into a fuller SQL query
    # makes me think it shouldn't be here?
    query = query.order_by(table.c[start_col])
    return query
コード例 #3
0
ファイル: api.py プロジェクト: foruy/openflow-multiopenstack
def server_get_by_mac(macaddr, ipaddr, group=True):
    session = get_session()
    data = {'has_more': False, 'src': None, 'dst': None}
    query = model_query(models.Instance.id, models.Instance.host,
                        models.Instance.user_id, models.Instance.host,
                        models.InstanceNetwork.address,
                        models.InstanceNetwork.mac_address,
                        session=session).filter(
            models.Instance.id==models.InstanceNetwork.instance_id)

    src = query.filter(models.InstanceNetwork.mac_address==macaddr).first()
    
    if src is not None:
        data['src'] = src
        dst = query.filter(models.InstanceNetwork.address==ipaddr,
                           models.Instance.user_id==src.user_id).first()
        if dst is not None:
            data['dst'] = dst
            if group: 
                query = model_query(models.SingleSecurityGroup,
                                    session=session).filter(or_(
                        and_(models.SingleSecurityGroup.start==src.id,
                             models.SingleSecurityGroup.end==dst.id),
                        and_(models.SingleSecurityGroup.start==dst.id,
                             models.SingleSecurityGroup.end==src.id)))
                if query.first():
                    data['has_more'] = True
    return data
コード例 #4
0
ファイル: view.py プロジェクト: yudahai/xiaoshuo01
def book_menu(novel_id):
    """小说目录"""
    if request.method == 'GET':
        ybd_chapter = db_session.query(Chapter.id, Chapter.name).filter(and_(Chapter.novel_id == novel_id,
                                                                             Chapter.content_source == 1)).all()

        bqg_chapter = db_session.query(Chapter.id, Chapter.name).filter(and_(Chapter.novel_id == novel_id,
                                                                             Chapter.content_source == 2)).all()

        novel = db_session.query(Novel).filter(Novel.id == novel_id).first()

        return render_template('book_menu.html', ybd_chapter=ybd_chapter, novel=novel, novel_id=novel_id,
                               bqg_chapter=bqg_chapter)

    elif request.method == 'POST':
        data = request.get_json()
        novel_text_path = os.path.join(os.getcwd(), 'uploads/novel_txt')
        if data['update_from'] == 1:
            from backtask.getnovelfromybdu import get_single_novel_text
            novel_name, novel_download_url = db_session.query(Novel.name, Novel.chapter_source_ybd_url).\
                filter(Novel.id == novel_id).first()
            get_single_novel_text(novel_download_url.encode('utf-8'), novel_name, str(novel_id), novel_text_path)
            from backtask.getnovel import update_single_chapter_last_and_next, single_from_text_store_sql
            single_from_text_store_sql(novel_text_path, novel_id)
            update_single_chapter_last_and_next(novel_id, 1)
            return jsonify({"good": "1"})
        elif data['update_from'] == 2:
            from backtask.getnovel import update_single_chapter_infor, update_chapter_content
            url = db_session.query(Novel.chapter_source_bequge_url).filter(Novel.id == novel_id).first()[0]
            update_single_chapter_infor(novel_id, url)
            update_chapter_content(novel_id)
            return jsonify({'good': '1'})
コード例 #5
0
ファイル: budget.py プロジェクト: vlikin/budget
 def deattach_user(self, user_id):
   '''
     - The user is deattached from the budget.
   '''
   if BudgetUserTable.query.filter(and_(BudgetUserTable.user_id==user_id, BudgetUserTable.budget_id==self.id, BudgetUserTable.role==BudgetUserTable.roles['owner'])).count() == 1:
     raise LogicException('It tries to delete a single owner with id=%d of the budget id=%d. It is not allowed' % (user_id, self.id))
   BudgetUserTable.query.filter(and_(BudgetUserTable.budget_id==self.id, BudgetUserTable.user_id==user_id)).delete()
コード例 #6
0
ファイル: messages.py プロジェクト: docstack/marconi
    def delete(self, queue, message_id, project, claim=None):
        if project is None:
            project = ''

        mid = utils.msgid_decode(message_id)
        if mid is None:
            return

        with self.driver.trans() as trans:
            try:
                self.get(queue, message_id, project, count=True)
            except errors.MessageDoesNotExist:
                return

            statement = tables.Messages.delete()
            and_stmt = [tables.Messages.c.id == mid]

            exists = sa.sql.select([tables.Messages.c.id], sa.and_(*and_stmt))

            if not trans.execute(exists).first():
                return

            cid = claim and utils.cid_decode(claim) or None

            if claim and cid is None:
                return

            and_stmt.append(tables.Messages.c.cid == cid)

            statement = statement.where(sa.and_(*and_stmt))
            res = trans.execute(statement)

            if res.rowcount == 0:
                raise errors.MessageIsClaimed(mid)
コード例 #7
0
ファイル: sql.py プロジェクト: eros-lige/UPCloud-keystone
    def list_domain_ids_for_user(self, user_id, group_ids, hints,
                                 inherited=False):
        with sql.transaction() as session:
            query = session.query(RoleAssignment.target_id)
            filters = []

            if user_id:
                sql_constraints = sqlalchemy.and_(
                    RoleAssignment.actor_id == user_id,
                    RoleAssignment.inherited == inherited,
                    RoleAssignment.type == AssignmentType.USER_DOMAIN)
                filters.append(sql_constraints)

            if group_ids:
                sql_constraints = sqlalchemy.and_(
                    RoleAssignment.actor_id.in_(group_ids),
                    RoleAssignment.inherited == inherited,
                    RoleAssignment.type == AssignmentType.GROUP_DOMAIN)
                filters.append(sql_constraints)

            if not filters:
                return []

            query = query.filter(sqlalchemy.or_(*filters)).distinct()

            return [assignment.target_id for assignment in query.all()]
コード例 #8
0
ファイル: verifyrequest.py プロジェクト: Mango-J/pushmanager
 def post(self):
     if not self.current_user:
         return self.send_error(403)
     self.requestid = pushmanager.core.util.get_int_arg(self.request, 'id')
     self.pushid = pushmanager.core.util.get_int_arg(self.request, 'push')
     select_query = db.push_pushes.select().where(
         db.push_pushes.c.id == self.pushid,
     )
     update_query = db.push_requests.update().where(SA.and_(
         db.push_requests.c.state == 'staged',
         db.push_requests.c.id == self.requestid,
         SA.exists(
             [1],
             SA.and_(
                 db.push_pushcontents.c.push == self.pushid,
                 db.push_pushcontents.c.request == self.requestid,
             )
         ))).values({
             'state': 'verified',
         })
     finished_query = db.push_requests.select().where(SA.and_(
         db.push_requests.c.state == 'staged',
         SA.exists(
             [1],
             SA.and_(
                 db.push_pushcontents.c.push == self.pushid,
                 db.push_pushcontents.c.request == db.push_requests.c.id,
             )
         )))
     db.execute_transaction_cb([select_query, update_query, finished_query], self.on_db_complete)
コード例 #9
0
ファイル: assignable.py プロジェクト: VinnieJohns/ggrc-core
  def _get_relate_filter(cls, predicate, related_type):
    """Used for filtering by related_assignee.

    Returns:
        Boolean stating whether such an assignee exists.
    """
    # pylint: disable=invalid-name
    # The upper case variables are allowed here to shorthand the class names.
    Rel = relationship.Relationship
    RelAttr = relationship.RelationshipAttr
    Person = person.Person
    return db.session.query(Rel).join(RelAttr).join(
        Person,
        or_(and_(
            Rel.source_id == Person.id,
            Rel.source_type == Person.__name__
        ), and_(
            Rel.destination_id == Person.id,
            Rel.destination_type == Person.__name__
        ))
    ).filter(and_(
        RelAttr.attr_value.contains(related_type),
        RelAttr.attr_name == "AssigneeType",
        or_(and_(
            Rel.source_type == Person.__name__,
            Rel.destination_type == cls.__name__,
            Rel.destination_id == cls.id
        ), and_(
            Rel.destination_type == Person.__name__,
            Rel.source_type == cls.__name__,
            Rel.source_id == cls.id
        )),
        or_(predicate(Person.name), predicate(Person.email))
    )).exists()
コード例 #10
0
ファイル: crime_helpers.py プロジェクト: EmilyWebber/plenario
def chg_crime():
    # Step Seven: Find updates
    dat_crime_table = Table('dat_chicago_crimes_all', Base.metadata, 
        autoload=True, autoload_with=engine, extend_existing=True)
    src_crime_table = Table('src_chicago_crimes_all', Base.metadata, 
        autoload=True, autoload_with=engine, extend_existing=True)
    chg_crime_table = Table('chg_chicago_crimes_all', Base.metadata, 
        Column('id', Integer, primary_key=True),
        extend_existing=True)
    chg_crime_table.drop(bind=engine, checkfirst=True)
    chg_crime_table.create(bind=engine)
    src_cols = [c for c in src_crime_table.columns if c.name not in ['id', 'start_date', 'end_date']]
    dat_cols = [c for c in dat_crime_table.columns if c.name not in ['id', 'start_date', 'end_date']]
    and_args = []
    for s, d in zip(src_cols, dat_cols):
        ors = or_(s != None, d != None)
        ands = and_(ors, s != d)
        and_args.append(ands)
    ins = chg_crime_table.insert()\
          .from_select(
              ['id'],
              select([src_crime_table.c.id])\
                  .select_from(src_crime_table.join(dat_crime_table,
                      src_crime_table.c.id == dat_crime_table.c.id))\
                  .where(or_(
                          and_(dat_crime_table.c.current_flag == True, 
                                and_(or_(src_crime_table.c.id != None, dat_crime_table.c.id != None), 
                                src_crime_table.c.id != dat_crime_table.c.id)),
                          *and_args))
          )
    conn = engine.contextual_connect()
    conn.execute(ins)
    return 'Changes found'
コード例 #11
0
ファイル: claims.py プロジェクト: openstacker/zaqar
    def update(self, queue, claim_id, metadata, project=None):
        if project is None:
            project = ''

        cid = utils.cid_decode(claim_id)
        if cid is None:
            raise errors.ClaimDoesNotExist(claim_id, queue, project)

        age = utils.get_age(tables.Claims.c.created)
        with self.driver.trans() as trans:
            qid = utils.get_qid(self.driver, queue, project)

            update = tables.Claims.update().where(sa.and_(
                tables.Claims.c.ttl > age,
                tables.Claims.c.id == cid,
                tables.Claims.c.id == qid))

            update = update.values(ttl=metadata['ttl'])

            res = trans.execute(update)
            if res.rowcount != 1:
                raise errors.ClaimDoesNotExist(claim_id, queue, project)

            update = (tables.Messages.update().
                      values(ttl=metadata['ttl'] + metadata['grace']).
                      where(sa.and_(
                          tables.Messages.c.ttl < metadata['ttl'],
                          tables.Messages.c.cid == cid)))
            trans.execute(update)
コード例 #12
0
ファイル: dal.py プロジェクト: jlaw9/GraphSpace
def get_edges(db_session, edges, order=desc(Edge.updated_at), page=0, page_size=10, partial_matching=False):
	edges = [('%%%s%%' % u, '%%%s%%' % v) for u, v in edges] if partial_matching else edges
	filter_group = [and_(Edge.head_node_id.ilike(u), Edge.tail_node_id.ilike(v)) for u, v in edges]
	filter_group.append(
		[and_(Edge.head_node.has(Node.label.ilike(u)), Edge.tail_node.has(Node.label.ilike(v))) for u, v in edges])
	return db_session.query(Graph).options(joinedload('head_node'), joinedload('tail_node')).filter(
		or_(*filter_group)).order_by(order).limit(page_size).offset(page * page_size)
コード例 #13
0
def findbyhost(session, host_id, created_start=None,
              created_end=None, created_user_id=None, desc=False):

    query = session.query(Machine).add_entity(Machine2Jobgroup).join(Machine2Jobgroup)

    query = query.filter(
                or_(
                    and_(Machine.parent_id == host_id, Machine.attribute == MACHINE_ATTRIBUTE['GUEST']),
                    and_(Machine.id == host_id, Machine.attribute == MACHINE_ATTRIBUTE['HOST'])
                )
            )

    #if created_user_id:
    if not created_user_id is None:
        query = query.filter(Machine2Jobgroup.created_user_id.in_(created_user_id))

    if created_start and created_end:
        query = query.filter(Machine2Jobgroup.created.between(created_start, created_end))
        
    elif created_start and (created_end is None):
        query = query.filter(created_start <= Machine2Jobgroup.created)
        
    elif (not created_start) and created_end:
        query = query.filter(Machine2Jobgroup.created <= created_end)
        
    if desc is True:
        return query.order_by(Machine2Jobgroup.id.desc()).all()
    else:
        return query.order_by(Machine2Jobgroup.id.asc()).all()
コード例 #14
0
ファイル: dal.py プロジェクト: jlaw9/GraphSpace
def get_graphs_by_edges_and_nodes_and_names(db_session, group_ids=None, names=None, nodes=None, edges=None, tags=None,
                                            order=desc(Graph.updated_at), page=0, page_size=10, partial_matching=False,
                                            owner_email=None, is_public=None):
	query = db_session.query(Graph)

	edges = [] if edges is None else edges
	nodes = [] if nodes is None else nodes
	names = [] if names is None else names
	tags = [] if tags is None else tags

	edges = [('%%%s%%' % u, '%%%s%%' % v) for u, v in edges] if partial_matching else edges
	nodes = ['%%%s%%' % node for node in nodes] if partial_matching else nodes
	names = ['%%%s%%' % name for name in names] if partial_matching else names
	tags = ['%%%s%%' % tag for tag in tags]

	graph_filter_group = []
	if is_public is not None:
		graph_filter_group.append(Graph.is_public == is_public)
	if owner_email is not None:
		graph_filter_group.append(Graph.owner_email == owner_email)
	if group_ids is not None:
		query = query.filter(Graph.shared_with_groups.any(Group.id.in_(group_ids)))
	if len(graph_filter_group) > 0:
		query = query.filter(*graph_filter_group)

	names_filter_group = [Graph.name.ilike(name) for name in names]
	tags_filter_group = [GraphTag.name.ilike(tag) for tag in tags]
	nodes_filter_group = [Node.label.ilike(node) for node in nodes]
	nodes_filter_group.extend([Node.name.ilike(node) for node in nodes])
	edges_filter_group = [and_(Edge.head_node.has(Node.name.ilike(u)), Edge.tail_node.has(Node.name.ilike(v))) for u, v
	                      in edges]
	edges_filter_group.extend(
		[and_(Edge.tail_node.has(Node.name.ilike(u)), Edge.head_node.has(Node.name.ilike(v))) for u, v in edges])
	edges_filter_group.extend(
		[and_(Edge.head_node.has(Node.label.ilike(u)), Edge.tail_node.has(Node.label.ilike(v))) for u, v in edges])
	edges_filter_group.extend(
		[and_(Edge.tail_node.has(Node.label.ilike(u)), Edge.head_node.has(Node.label.ilike(v))) for u, v in edges])

	options_group = []
	if len(nodes_filter_group) > 0:
		options_group.append(joinedload('nodes'))
	if len(edges_filter_group) > 0:
		options_group.append(joinedload('edges'))
	if len(options_group) > 0:
		query = query.options(*options_group)

	combined_filter_group = []
	if len(nodes_filter_group) > 0:
		combined_filter_group.append(Graph.nodes.any(or_(*nodes_filter_group)))
	if len(edges_filter_group) > 0:
		combined_filter_group.append(Graph.edges.any(or_(*edges_filter_group)))
	if len(names_filter_group) > 0:
		combined_filter_group.append(*names_filter_group)
	if len(tags_filter_group) > 0:
		combined_filter_group.append(*tags_filter_group)

	if len(combined_filter_group) > 0:
		query = query.filter(or_(*combined_filter_group))

	return query.order_by(order).limit(page_size).offset(page * page_size).all()
コード例 #15
0
ファイル: dal.py プロジェクト: jlaw9/GraphSpace
def find_edges(db_session, is_directed=None, names=None, edges=None, graph_id=None, limit=None, offset=None,
               order_by=desc(Node.updated_at)):
	query = db_session.query(Edge)

	if graph_id is not None:
		query = query.filter(Edge.graph_id == graph_id)

	if is_directed is not None:
		query = query.filter(Edge.is_directed == is_directed)

	names = [] if names is None else names
	edges = [] if edges is None else edges
	if len(names + edges) > 0:
		names_filter = [Edge.name.ilike(name) for name in names]
		edges_filter = [and_(Edge.head_node_name.ilike(u), Edge.tail_node_name.ilike(v)) for u, v in edges]
		edges_filter.extend([and_(Edge.tail_node_name.ilike(u), Edge.head_node_name.ilike(v)) for u, v in edges])
		edges_filter.extend([and_(Edge.head_node_label.ilike(u), Edge.tail_node_label.ilike(v)) for u, v in edges])
		edges_filter.extend([and_(Edge.tail_node_label.ilike(u), Edge.head_node_label.ilike(v)) for u, v in edges])
		query = query.filter(or_(*(edges_filter + names_filter)))

	total = query.count()

	if order_by is not None:
		query = query.order_by(order_by)

	if offset is not None and limit is not None:
		query = query.limit(limit).offset(offset)

	return total, query.all()
コード例 #16
0
ファイル: user.py プロジェクト: ECGHelloWorld/PhoenixNow
def admin_weekly_checkins(date, grade=None):
    week = week_magic(date)
    if grade is None:
        checkins = Checkin.query.filter(and_(func.date(Checkin.checkin_timestamp)>=week[0], func.date(Checkin.checkin_timestamp)<=week[4])).order_by('Checkin.checkin_timestamp')
    else:
        checkins = Checkin.query.filter(and_(Checkin.user.has(grade=grade), func.date(Checkin.checkin_timestamp)>=week[0], func.date(Checkin.checkin_timestamp)<=week[4])).order_by('Checkin.checkin_timestamp')
    return checkins
コード例 #17
0
ファイル: __init__.py プロジェクト: Callek/build-relengapi
def list_tokens(typ=None):
    """Get a list of all unlimited-duration tokens the user has permisison to
    see.

    With ``?typ=..``, limit to tokens of that type.

    Note that the response does not include the actual token strings.
    Such strings are only revealed when creating a new token."""
    tbl = tables.Token
    email = get_user_email()

    conds = []
    if p.base.tokens.prm.view.can():
        conds.append(tbl.typ == 'prm')
    if p.base.tokens.usr.view.all.can():
        conds.append(tbl.typ == 'usr')
    elif email and p.base.tokens.usr.view.my.can():
        conds.append(sa.and_(tbl.typ == 'usr',
                             tbl.user == email))
    if not conds:
        return []
    disjunction = sa.or_(*conds)
    if typ:
        filter_cond = sa.and_(disjunction, tbl.typ == typ)
    else:
        filter_cond = disjunction

    q = tables.Token.query.filter(filter_cond)
    return [t.to_jsontoken() for t in q.all()]
コード例 #18
0
ファイル: db.py プロジェクト: B-Rich/haslinger
    def get_mapped_codes(cls, code='', forward=True, diagnosis=True):
        """ Note that ICD9 code must be passed in with the dot, but
        the dot must be stripped for Mapper. """
        if forward:
            if not Icd9Code.is_valid_code(code=code, diagnosis=diagnosis):
                raise cls.InvalidIcd9Code('%s is an invalid ICD9 code.' % code)

            matches = Mapper.query.filter(
                    and_(
                        Mapper.forward==True,
                        Mapper.icd9code==code.replace('.', ''),
                        Mapper.diagnosis==diagnosis,)
                ).order_by('choice_list', 'icd10code').all()
        else:
            if not Icd10Code.query.filter(and_(Icd10Code.code==code, Icd10Code.diagnosis==diagnosis)).count() == 1:
                raise cls.InvalidIcd10Code('Invalid ICD10 code.')

            matches = Mapper.query.filter(
                    and_(
                        Mapper.forward==False,
                        Mapper.icd10code==code.replace('.', ''),
                        Mapper.diagnosis==diagnosis,)
                ).all()

        return matches
コード例 #19
0
  def pushDB(self, db, sensor_id, parent_domain_id):
    q = db.query(Domain.domain_id)
    q = q.filter(and_(Domain.parent_domain_id == parent_domain_id, Domain.domain_name == self.name))
    res = q.all()

    if len(res) == 0:
      o = Domain()
      o.domain_name = self.name
      o.parent_domain_id = parent_domain_id
      db.add(o)
      db.flush()

      self.oid = o.domain_id

      sd = Sensor_Domain()
      sd.domain_id = self.oid
      sd.sensor_id = sensor_id
      sd.first_seen = self.fs
      sd.last_seen = self.ls

      db.add(sd)
      db.flush()
    else:
      self.oid = res[0][0]

      q = db.query(Sensor_Domain)
      q = q.filter(and_(Sensor_Domain.domain_id == self.oid, Sensor_Domain.sensor_id == sensor_id))
      q.update({Sensor_Domain.first_seen: case([(Sensor_Domain.first_seen > self.fs, self.fs)], else_=Sensor_Domain.first_seen),
                Sensor_Domain.last_seen: case([(Sensor_Domain.last_seen < self.ls, self.ls)], else_=Sensor_Domain.last_seen)}, synchronize_session=False)

    return self.oid
コード例 #20
0
def generate_plots(session, result_dir, output_dir):
    ratios = read_ratios(result_dir)

    iteration = session.query(func.max(cm2db.RowMember.iteration))
    clusters = [r[0] for r in session.query(cm2db.RowMember.cluster).distinct().filter(
        cm2db.RowMember.iteration == iteration)]

    figure = plt.figure(figsize=(6,3))
    for cluster in clusters:
        plt.clf()
        plt.cla()
        genes = [r.row_name.name for r in session.query(cm2db.RowMember).filter(
            and_(cm2db.RowMember.cluster == cluster, cm2db.RowMember.iteration == iteration))]
        cluster_conds = [c.column_name.name for c in session.query(cm2db.ColumnMember).filter(
            and_(cm2db.ColumnMember.cluster == cluster, cm2db.ColumnMember.iteration == iteration))]
        all_conds = [c[0] for c in session.query(cm2db.ColumnName.name).distinct()]
        non_cluster_conds = [cond for cond in all_conds if not cond in set(cluster_conds)]

        cluster_data = ratios.loc[genes, cluster_conds]
        non_cluster_data = ratios.loc[genes, non_cluster_conds]
        min_value = ratios.min()
        max_value = ratios.max()
        for gene in genes:
            values = [normalize_js(val) for val in cluster_data.loc[gene,:].values]
            values += [normalize_js(val) for val in non_cluster_data.loc[gene,:].values]
            plt.plot(values)

        # plot the "in"/"out" separator line
        cut_line = len(cluster_conds)
        plt.plot([cut_line, cut_line], [min_value, max_value], color='red',
                 linestyle='--', linewidth=1)
        plt.savefig(os.path.join(output_dir, "exp-%d" % cluster))
    plt.close(figure)
コード例 #21
0
ファイル: api.py プロジェクト: yf225/Wasup-server
def get_photo_data_for_live_chat():
    cur_user_id = request.args.get('cur_user_id', None)
    if cur_user_id:
        cur_user_id = str(cur_user_id)
    else:
        abort(404)
    try:
        cur_user = regional_db.session.query(User).filter(and_(User.id==cur_user_id, User.is_deleted==False)).one()
    except NoResultFound:
        abort(404)

    photo_id = request.args.get('photo_id', None) #first version

    message_id = request.args.get('message_id', None) #second version

    if photo_id:
        message_id = str(photo_id)
    elif message_id:
        message_id = str(message_id)
    else:
        abort(404)

    try:
        message = regional_db.session.query(Message).filter(and_(Message.id==message_id, Message.is_deleted==False)).one()
    except NoResultFound:
        abort(404)

    if message.type == 'photo':
        photo_url = message.get_media_url()
    elif message.type == 'special' and message.special_message_type == 'drawing':
        photo_url = regional_db.session.query(Message).filter(and_(Message.id==message.media_message_id, Message.is_deleted==False)).one().get_media_url()
    
    return redirect(photo_url)
コード例 #22
0
ファイル: users.py プロジェクト: ryanpetrello/draughtcraft
    def validate(cls, username, password):
        # Lookup the user
        user = cls.get_by(username=username)
        if user:
            salt = user.password.split(':')[0]
            pbk = cls.__hash_password__(password, salt)

            # If PBKDF2 matches...
            match = cls.query.filter(and_(
                cls.username == username,
                cls.password == pbk
            )).first()
            if match is not None:
                return match

            # Otherwise the user might have a sha256 password
            salt = getattr(
                getattr(conf, 'session', None),
                'password_salt',
                'example'
            )
            sha = sha256(password + salt).hexdigest()

            # If sha256 matches...
            match = cls.query.filter(and_(
                cls.username == username,
                cls.password == sha
            )).first()
            if match is not None:
                # Overwrite to use PBKDF2 in the future
                user.password = password
                return match
コード例 #23
0
ファイル: api.py プロジェクト: quadewarren/quark
def _network_find(context, limit, sorts, marker, page_reverse, fields,
                  defaults=None, provider_query=False, **filters):
    query = context.session.query(models.Network)
    model_filters = _model_query(context, models.Network, filters, query)

    if defaults:
        invert_defaults = False
        if INVERT_DEFAULTS in defaults:
            invert_defaults = True
            defaults.pop(0)
        if filters and invert_defaults:
            query = query.filter(and_(not_(models.Network.id.in_(defaults)),
                                      and_(*model_filters)))
        elif not provider_query and filters and not invert_defaults:
            query = query.filter(or_(models.Network.id.in_(defaults),
                                     and_(*model_filters)))

        elif not invert_defaults:
            query = query.filter(models.Network.id.in_(defaults))
    else:
        query = query.filter(*model_filters)

    if "join_subnets" in filters:
        query = query.options(orm.joinedload(models.Network.subnets))

    return paginate_query(query, models.Network, limit, sorts, marker)
コード例 #24
0
def GetJson_ACPeriodCate(userID, modeDate, startDate, endDate):
    """返回Json:门禁趋势与分布
    :param userID: 查询工号
    :param modeDate: 日期模式,合并到最短时间单位. 0-day, 1-week, 2-month, 3-Quarter. (default 2)
    :param startDate: 限定来源数据起始日期
    :param endDate: 限定来源数据结束日期
    """
    # Query.
    strQuery = db.session.query(acrec.ac_datetime, ac_loc.category).filter(
        and_(acrec.user_id == userID, acrec.node_id == ac_loc.node_id)).order_by(acrec.ac_datetime)
    if len(startDate) != 0:
        strQuery = strQuery.filter(and_(acrec.ac_datetime >= startDate, acrec.ac_datetime <= endDate))
    results = strQuery.all()
    if len(results) == 0:
        return {'errMsg': u'没有找到记录。'}

    res_datetimes = [result.ac_datetime for result in results]
    res_categorys = [result.category for result in results]

    from ACPeriodCate import ACPeriodCate
    process = ACPeriodCate(res_datetimes, res_categorys)
    json_dateTrend = process.get_date_trend(modeDate)
    json_timeDistribution = process.get_time_distribution()

    json_response = {'json_dateTrend':json_dateTrend, 'json_timeDistribution':json_timeDistribution}

    return json_response
コード例 #25
0
    def _get_filter(self, tag, user_id, include_draft, conn):
        filters = []
        if tag:
            tag = tag.upper()
            tag_statement = sqla.select([self._tag_table.c.id]).where(
                self._tag_table.c.text == tag)
            tag_result = conn.execute(tag_statement).fetchone()
            if tag_result is not None:
                tag_id = tag_result[0]
                tag_filter = sqla.and_(
                    self._tag_posts_table.c.tag_id == tag_id,
                    self._post_table.c.id == self._tag_posts_table.c.post_id
                )
                filters.append(tag_filter)

        if user_id:
            user_filter = sqla.and_(
                self._user_posts_table.c.user_id == user_id,
                self._post_table.c.id == self._user_posts_table.c.post_id
            )
            filters.append(user_filter)

        draft_filter = self._post_table.c.draft == 1 if include_draft else \
            self._post_table.c.draft == 0
        filters.append(draft_filter)
        sql_filter = sqla.and_(*filters)
        return sql_filter
コード例 #26
0
ファイル: items.py プロジェクト: SR1s/WMS
def perform_create():
    details = json.loads(request.form['details'])
    item_result = list()
    isOk = True
    store = list()
    for detail in details:
        item = Item.query.filter_by(number=detail['number']).first()
        if item:
            place_id = session['place_id']
            for c in detail['columns']:
                have = Storage.query.filter(and_(Storage.item_id==item.id,Storage.place_id==place_id, Storage.size==c['size'])).first().amount
                rest = have - int(c['amount'])
                if rest<0:
                    isOk=False
                    flash('编号:%s 尺寸:%s的货物库存不足,剩余:%s,需要:%s' %\
                          (detail['number'], c['size'], have, c['amount']))
                store.append(dict(item_id=item.id, amount=rest, size=c['size']))
        else:
            isOk = False
            flash('不存在编号%s的货物' % detail['number'], category='error')
        #return json.dumps(store)
    if isOk:
        for s in store:
            change = Storage.query.filter(and_(Storage.item_id==s['item_id'],\
                                               Storage.size==s['size'], \
                                               Storage.place_id==session['place_id'])).first()
            change.amount=s['amount']
            db.session.add(change)
        db.session.commit()
        flash('出货成功', 'normal')
    return redirect(url_for('items.list_all'))
コード例 #27
0
ファイル: models.py プロジェクト: mmarescc/Parenchym
    def load_root(cls, sess, name='root', use_cache=True):
        """
        Loads root resource of resource tree.

        Since we allow several trees in the same table, argument ``name`` tells
        which one we want.

        :param sess: A DB session
        :param name: Name of the wanted root node
        :return: Instance of the root node or, None if not found
        """
        # CAVEAT: Setup fails if we use cache here!
        if use_cache:
            r = sess.query(
                cls
            ).options(
                pym.cache.FromCache("auth_long_term",
                cache_key='resource:{}:None'.format(name))
            ).options(
                pym.cache.RelationshipCache(cls.children, "auth_long_term",
                cache_key='resource:{}:None:children'.format(name))
            ).options(
                # CAVEAT: Program hangs if we use our own cache key here!
                pym.cache.RelationshipCache(cls.acl, "auth_long_term")  # ,
                #cache_key='resource:{}:None:acl'.format(name))
            ).filter(
                sa.and_(cls.parent_id == None, cls.name == name)
            ).one()
        else:
            r = sess.query(
                cls
            ).filter(
                sa.and_(cls.parent_id == None, cls.name == name)
            ).one()
        return r
コード例 #28
0
ファイル: api.py プロジェクト: quadewarren/quark
def _subnet_find(context, limit, sorts, marker, page_reverse, fields,
                 defaults=None, provider_query=False, **filters):
    query = context.session.query(models.Subnet)
    model_filters = _model_query(context, models.Subnet, filters, query)

    if defaults:
        invert_defaults = False
        if INVERT_DEFAULTS in defaults:
            invert_defaults = True
            defaults.pop(0)
        if filters and invert_defaults:
            query = query.filter(and_(not_(models.Subnet.id.in_(defaults)),
                                      and_(*model_filters)))
        elif not provider_query and filters and not invert_defaults:
            query = query.filter(or_(models.Subnet.id.in_(defaults),
                                     and_(*model_filters)))

        elif not invert_defaults:
            query = query.filter(models.Subnet.id.in_(defaults))
    else:
        query = query.filter(*model_filters)

    if "join_dns" in filters:
        query = query.options(orm.joinedload(models.Subnet.dns_nameservers))

    if "join_routes" in filters:
        query = query.options(orm.joinedload(models.Subnet.routes))

    if "join_pool" in filters:
        query = query.options(orm.undefer('_allocation_pool_cache'))

    return paginate_query(query, models.Subnet, limit, sorts, marker)
コード例 #29
0
 def get_cases(status, current_user, user=False, QA=False, current_user_perms=False, case_perm_checker=None,
               case_man=False):
     q = session.query(Case)
     if status != 'All' and status != "Queued":
         q = q.filter_by(currentStatus=status)
     elif status == "Queued":
         q = q.filter_by(currentStatus=CaseStatus.OPEN).join('tasks').filter(Task.currentStatus == TaskStatus.QUEUED)
     if user is True:
         q = q.join('tasks').join(Task.task_roles)
         if QA:
             q = q.filter(and_(UserTaskRoles.user_id == current_user.id, UserTaskRoles.role.in_(UserTaskRoles.qa_roles)))
         else:
             q = q.filter(and_(UserTaskRoles.user_id == current_user.id, UserTaskRoles.role.in_(UserTaskRoles.inv_roles)))
         return q.order_by(desc(Case.creation_date)).all()
     else:
         cases = q.order_by(desc(Case.creation_date)).all()
         output = []
         for case in cases:
             if (case_man is True and case.principle_case_manager is None and case.secondary_case_manager is None) \
                     or case_man is False:
                 try:
                     case_perm_checker(current_user, case, "view")
                     output.append(case)
                 except Forbidden:
                     pass
         return output
コード例 #30
0
ファイル: api.py プロジェクト: anilyadav/mistral
def _get_criterion(resource_id, member_id=None, is_owner=True):
    """Generates criterion for querying resource_member_v2 table."""

    # Resource owner query resource membership with member_id.
    if is_owner and member_id:
        return sa.and_(
            models.ResourceMember.project_id == security.get_project_id(),
            models.ResourceMember.resource_id == resource_id,
            models.ResourceMember.member_id == member_id
        )
    # Resource owner query resource memberships.
    elif is_owner and not member_id:
        return sa.and_(
            models.ResourceMember.project_id == security.get_project_id(),
            models.ResourceMember.resource_id == resource_id,
        )

    # Other members query other resource membership.
    elif not is_owner and member_id and member_id != security.get_project_id():
        return None

    # Resource member query resource memberships.
    return sa.and_(
        models.ResourceMember.member_id == security.get_project_id(),
        models.ResourceMember.resource_id == resource_id
    )
コード例 #31
0
class PostService(BaseService):
    async def list(
        self,
        offset: int = 0,
        limit: int = 10,
        username: Optional[str] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None,
    ) -> PostListResult:
        """List posts.

        :param offset: the number of posts to skip
        :param limit: the number of posts to fetch
        :param username: username of post owner to filter
        :param start_time: the start of creation time to filter posts
        :param end_time: the end of creation time to filter posts
        :return: the list query result
        """

        posts_statement = sa.select([schema.posts.c.shortcode
                                     ]).select_from(schema.posts)
        if username:
            posts_statement = posts_statement.where(
                schema.posts.c.username == username)
        if start_time:
            start_time = datetime.utcfromtimestamp(start_time.timestamp())
            posts_statement = posts_statement.where(
                schema.posts.c.timestamp >= start_time)
        if end_time:
            end_time = datetime.utcfromtimestamp(end_time.timestamp())
            posts_statement = posts_statement.where(
                schema.posts.c.timestamp < end_time)
        posts_statement = posts_statement.order_by(
            schema.posts.c.timestamp.desc()).offset(offset).limit(limit)
        statement = sa.select([
            schema.posts.c.shortcode,
            schema.posts.c.username,
            schema.posts.c.timestamp,
            schema.posts.c.type,
            schema.posts.c.caption,
            schema.posts.c.caption_hashtags,
            schema.posts.c.caption_mentions,
            schema.post_items.c.index.label('item_index'),
            schema.post_items.c.type.label('item_type'),
            schema.post_items.c.duration.label('item_duration'),
            schema.post_items.c.filename.label('item_filename'),
            schema.post_items.c.thumb_image_filename.label(
                'item_thumb_image_filename'),
        ]).select_from(
            schema.posts.join(
                schema.post_items,
                schema.posts.c.shortcode == schema.post_items.c.shortcode)
        ).where(schema.post_items.c.shortcode.in_(posts_statement)).order_by(
            schema.posts.c.timestamp.desc(), schema.post_items.c.index.asc())

        posts = []
        for result in await self.database.fetch_all(statement):
            item = PostItem(
                index=result['item_index'],
                type=result['item_type'],
                duration=result['item_duration'],
                filename=result['item_filename'],
                thumb_image_filename=result['item_thumb_image_filename'],
            )
            if posts and posts[-1].shortcode == result['shortcode']:
                posts[-1].items.append(item)
            else:
                post = Post(items=[item], **result)
                post.timestamp = post.timestamp.replace(tzinfo=timezone.utc)
                posts.append(post)

        statement = sa.select([sa.func.count()]).select_from(schema.posts)
        count = await self.database.fetch_val(statement)

        return PostListResult(posts=posts,
                              limit=limit,
                              offset=offset,
                              count=count)

    async def exists(self, shortcode: str) -> bool:
        """Check if a post exists.

        :param shortcode: shortcode of the post to check
        :return: if the post exists
        """

        statement = sa.select([schema.posts.c.shortcode
                               ]).where(schema.posts.c.shortcode == shortcode)
        exists_statement = sa.select([sa.exists(statement)])
        return await self.database.fetch_val(query=exists_statement)

    async def delete(self, shortcode: str, index: Optional[int] = None):
        """Delete post and post items

        :param shortcode: the shortcode of the post to delete
        :param index: index of the post item to delete
        """

        async with self.database.transaction():
            # find info about post items
            list_statement = sa.select([
                schema.posts.c.username,
                schema.post_items.c.index,
                schema.post_items.c.type,
                schema.post_items.c.filename,
                schema.post_items.c.thumb_image_filename,
            ]).select_from(
                schema.posts.join(
                    schema.post_items, schema.posts.c.shortcode ==
                    schema.post_items.c.shortcode)).where(
                        schema.post_items.c.shortcode == shortcode)
            post_items = [
                item for item in await self.database.fetch_all(list_statement)
            ]

            # move files to recycle
            for item in post_items:
                if index is not None and item['index'] != index:
                    continue
                if filename := item['filename']:
                    media_path = self.post_dir.joinpath(
                        item['username'], filename)
                    shutil.chown(media_path, os.getuid(), os.getgid())
                    media_path.unlink()
                if thumb_image_filename := item['thumb_image_filename']:
                    thumb_path = self.thumb_images_dir.joinpath(
                        item['username'], thumb_image_filename)
                    shutil.chown(thumb_path, os.getuid(), os.getgid())
                    thumb_path.unlink()

            # delete post(if post has only one item left) and post item records
            if index is not None:
                where_clause = sa.and_(
                    schema.post_items.c.shortcode == shortcode,
                    schema.post_items.c.index == index,
                )
            else:
                where_clause = schema.post_items.c.shortcode == shortcode
            delete_statement = sa.delete(schema.post_items).where(where_clause)
            await self.database.execute(delete_statement)
            if len(post_items) == 1:
                delete_statement = sa.delete(
                    schema.posts).where(schema.posts.c.shortcode == shortcode)
                await self.database.execute(delete_statement)
コード例 #32
0
    def _fetch_crop_parameter_values(self, metadata, idcrop_parametrization):
        """Derived the crop parameter values from the table crop_parametrization_parameter
        table for given idcrop and add directly to dict self[]..
        """
        t1 = Table("crop_parametrization_parameter", metadata, autoload=True)
        t2 = Table("global_crop_parameters", metadata, autoload=True)

        # Pull single value parameters from table crop_parametrization_parameter
        sm = select([t1.c.xvalue, t2.c.crop_parameter],
                    and_(t1.c.idcrop_parametrization == idcrop_parametrization,
                         t1.c.idcrop_parameter == t2.c.idcrop_parameter,
                         t2.c.multi == 'N', t2.c.idcategory == 1))
        sc = sm.execute()
        rows = sc.fetchall()
        sc.close()

        for row in rows:
            if row.crop_parameter not in self.single2tabular:
                self[row.crop_parameter] = float(row.xvalue)
            else:
                pvalue = float(row.xvalue)
                code, value = self._convert_single2tabular(
                    row.crop_parameter, pvalue)
                self[code] = value

        # Check that we have had all the single and single2tabular parameters now
        for parameter_code in (self.parameter_codes_single +
                               tuple(self.single2tabular.keys())):
            found = False
            if parameter_code not in self.single2tabular:
                if parameter_code in self:
                    found = True
            else:
                for key in self:
                    if key.startswith(parameter_code):
                        found = True
                        break
            if not found and parameter_code not in self.parameters_optional:
                msg = (
                    "No parameter value found for idcrop_parametrization=%s, "
                    "parameter_code='%s'." %
                    (self.idcrop_parametrization, parameter_code))
                raise exc.PCSEError(msg)

        # Pull tabular parameters from crop_parametrization_parameter
        for crop_parameter in self.parameter_codes_tabular:
            pattern = crop_parameter + r'%'
            sc = select(
                [t1.c.xvalue, t1.c.yvalue, t2.c.crop_parameter],
                and_(t1.c.idcrop_parametrization == idcrop_parametrization,
                     t1.c.idcrop_parameter == t2.c.idcrop_parameter,
                     t2.c.idcategory == 1, t2.c.multi == 'Y',
                     t2.c.crop_parameter.like(pattern)),
                order_by=[t2.c.crop_parameter]).execute()
            rows = sc.fetchall()
            sc.close()
            if not rows and crop_parameter not in self.parameters_optional:
                msg = "No parameter value found for idcrop_parametrization=%s, crop_parameter='%s'."
                raise exc.PCSEError(
                    msg % (self.idcrop_parametrization, crop_parameter))

            if len(rows) == 1:
                msg = (
                    "Single parameter value found for idcrop_parametrization=%s, "
                    "crop_parameter='%s' while tabular parameter expected." %
                    (idcrop_parametrization, crop_parameter))
                raise exc.PCSEError(msg)
            values = []
            for row in rows:
                values.extend([float(row.xvalue), float(row.yvalue)])
            self[crop_parameter] = values
コード例 #33
0
    def __init__(self,
                 engine,
                 idgrid,
                 idcrop_parametrization,
                 campaign_year,
                 campaign_start=None):
        # Initialise
        list.__init__(self)
        self.idgrid = idgrid
        self.idcrop_parametrization = idcrop_parametrization
        self.crop_name = fetch_crop_name(engine, idcrop_parametrization)
        self.campaign_year = campaign_year
        self.amdict = {}

        # Use the idcrop_parametrization to search in the table crop_calendars
        metadata = MetaData(engine)
        t = Table("crop_calendars", metadata, autoload=True)
        sm = select(
            [t],
            and_(t.c.idgrid == self.idgrid,
                 t.c.idcrop_parametrization == self.idcrop_parametrization,
                 t.c.year == self.campaign_year))
        sc = sm.execute()
        row = sc.fetchone()
        sc.close()

        # Process the query result - dates should be in the format 'yyyy-mm-dd'!
        if row is None:
            msg = "Failed deriving agromanagement info for grid %s" % self.idgrid
            raise exc.PCSEError(msg)

        for key, value in row.items():
            if value in ["EMERGENCE", "SOWING", "HARVEST", "MATURITY"]:
                value = value.lower()
            if key == "duration":
                value = int(value)
            self.amdict[key] = value

        self.conditional_datecopy("start_period", "crop_start_date",
                                  "emergence", "sowing")
        self.conditional_datecopy("end_period", "crop_end_date", "maturity",
                                  "harvesting")
        self.amdict["campaign_start_date"] = check_date(
            self.amdict["crop_start_date"])
        self.amdict["campaign_end_date"] = check_date(
            self.amdict["crop_end_date"]) + dt.timedelta(days=1)
        self.amdict["crop_name"] = self.crop_name
        # We do not get a variety_name from the CGMS database, so we make one
        # as <crop_name>_<grid>_<year>
        self.amdict["variety_name"] = "%s_%s_%s" % (
            self.crop_name, self.idgrid, self.campaign_year)

        # determine the campaign_start_date
        if campaign_start is None:
            self.amdict["campaign_start_date"] = self.amdict['crop_start_date']
        elif isinstance(campaign_start, (int, float)):
            ndays = abs(int(campaign_start))
            self.amdict["campaign_start_date"] = self.amdict[
                "crop_start_date"] - dt.timedelta(days=ndays)
        else:
            try:
                campaign_start = check_date(campaign_start)
                if campaign_start <= self.amdict["crop_start_date"]:
                    self.amdict["campaign_start_date"] = campaign_start
                else:
                    msg = "Date (%s) specified by keyword 'campaign_start' in call to AgroManagementDataProvider " \
                          "is later then crop_start_date defined in the CGMS database."
                    raise exc.PCSEError(msg % campaign_start)
            except KeyError as e:
                msg = "Value (%s) of keyword 'campaign_start' not recognized in call to AgroManagementDataProvider."
                raise exc.PCSEError(msg % campaign_start)

        input = self._build_yaml_agromanagement()
        self._parse_yaml(input)
コード例 #34
0
def _set_aggregates(context, resource_provider, provided_aggregates,
                    increment_generation=False):
    rp_id = resource_provider.id
    # When aggregate uuids are persisted no validation is done
    # to ensure that they refer to something that has meaning
    # elsewhere. It is assumed that code which makes use of the
    # aggregates, later, will validate their fitness.
    # TODO(cdent): At the moment we do not delete
    # a PlacementAggregate that no longer has any associations
    # with at least one resource provider. We may wish to do that
    # to avoid bloat if it turns out we're creating a lot of noise.
    # Not doing now to move things along.
    provided_aggregates = set(provided_aggregates)
    existing_aggregates = _get_aggregates_by_provider_id(context, rp_id)
    agg_uuids_to_add = provided_aggregates - set(existing_aggregates.values())
    # A dict, keyed by internal aggregate ID, of aggregate UUIDs that will be
    # associated with the provider
    aggs_to_associate = {}
    # Same dict for those aggregates to remove the association with this
    # provider
    aggs_to_disassociate = {
        agg_id: agg_uuid for agg_id, agg_uuid in existing_aggregates.items()
        if agg_uuid not in provided_aggregates
    }

    # Create any aggregates that do not yet exist in
    # PlacementAggregates. This is different from
    # the set in existing_aggregates; those are aggregates for
    # which there are associations for the resource provider
    # at rp_id. The following loop checks for the existence of any
    # aggregate with the provided uuid. In this way we only
    # create a new row in the PlacementAggregate table if the
    # aggregate uuid has never been seen before. Code further
    # below will update the associations.
    for agg_uuid in agg_uuids_to_add:
        agg_id = _ensure_aggregate(context, agg_uuid)
        aggs_to_associate[agg_id] = agg_uuid

    for agg_id, agg_uuid in aggs_to_associate.items():
        try:
            ins_stmt = _RP_AGG_TBL.insert().values(
                resource_provider_id=rp_id, aggregate_id=agg_id)
            context.session.execute(ins_stmt)
            LOG.debug("Setting aggregates for provider %s. Successfully "
                      "associated aggregate %s.",
                      resource_provider.uuid, agg_uuid)
        except db_exc.DBDuplicateEntry:
            LOG.debug("Setting aggregates for provider %s. Another thread "
                      "already associated aggregate %s. Skipping.",
                      resource_provider.uuid, agg_uuid)
            pass

    for agg_id, agg_uuid in aggs_to_disassociate.items():
        del_stmt = _RP_AGG_TBL.delete().where(
            sa.and_(
                _RP_AGG_TBL.c.resource_provider_id == rp_id,
                _RP_AGG_TBL.c.aggregate_id == agg_id))
        context.session.execute(del_stmt)
        LOG.debug("Setting aggregates for provider %s. Successfully "
                  "disassociated aggregate %s.",
                  resource_provider.uuid, agg_uuid)

    if increment_generation:
        resource_provider.increment_generation()
コード例 #35
0
ファイル: accounts.py プロジェクト: Acidburn0zzz/aurweb
async def accounts_post(request: Request,
                        O: int = Form(default=0),  # Offset
                        SB: str = Form(default=str()),  # Sort By
                        U: str = Form(default=str()),  # Username
                        T: str = Form(default=str()),  # Account Type
                        S: bool = Form(default=False),  # Suspended
                        E: str = Form(default=str()),  # Email
                        R: str = Form(default=str()),  # Real Name
                        I: str = Form(default=str()),  # IRC Nick
                        K: str = Form(default=str())):  # PGP Key
    context = await make_variable_context(request, "Accounts")
    context["pp"] = pp = 50  # Hits per page.

    offset = max(O, 0)  # Minimize offset at 0.
    context["offset"] = offset  # Offset.

    context["params"] = dict(await request.form())
    if "O" in context["params"]:
        context["params"].pop("O")

    # Setup order by criteria based on SB.
    order_by_columns = {
        "t": (models.AccountType.ID.asc(), models.User.Username.asc()),
        "r": (models.User.RealName.asc(), models.AccountType.ID.asc()),
        "i": (models.User.IRCNick.asc(), models.AccountType.ID.asc()),
    }
    default_order = (models.User.Username.asc(), models.AccountType.ID.asc())
    order_by = order_by_columns.get(SB, default_order)

    # Convert parameter T to an AccountType ID.
    account_types = {
        "u": at.USER_ID,
        "t": at.TRUSTED_USER_ID,
        "d": at.DEVELOPER_ID,
        "td": at.TRUSTED_USER_AND_DEV_ID
    }
    account_type_id = account_types.get(T, None)

    # Get a query handle to users, populate the total user
    # count into a jinja2 context variable.
    query = db.query(models.User).join(models.AccountType)

    # Populate this list with any additional statements to
    # be ANDed together.
    statements = [
        v for k, v in [
            (account_type_id is not None, models.AccountType.ID == account_type_id),
            (bool(U), models.User.Username.like(f"%{U}%")),
            (bool(S), models.User.Suspended == S),
            (bool(E), models.User.Email.like(f"%{E}%")),
            (bool(R), models.User.RealName.like(f"%{R}%")),
            (bool(I), models.User.IRCNick.like(f"%{I}%")),
            (bool(K), models.User.PGPKey.like(f"%{K}%")),
        ] if k
    ]

    # Filter the query by coe-mbining all statements added above into
    # an AND statement, unless there's just one statement, which
    # we pass on to filter() as args.
    if statements:
        query = query.filter(and_(*statements))

    context["total_users"] = query.count()

    # Finally, order and truncate our users for the current page.
    users = query.order_by(*order_by).limit(pp).offset(offset).all()
    context["users"] = util.apply_all(users, db.refresh)

    return render_template(request, "account/index.html", context)
コード例 #36
0
ファイル: accounts.py プロジェクト: Acidburn0zzz/aurweb
async def passreset_post(request: Request,
                         user: str = Form(...),
                         resetkey: str = Form(default=None),
                         password: str = Form(default=None),
                         confirm: str = Form(default=None)):
    context = await make_variable_context(request, "Password Reset")

    # The user parameter being required, we can match against
    criteria = or_(models.User.Username == user, models.User.Email == user)
    db_user = db.query(models.User,
                       and_(criteria, models.User.Suspended == 0)).first()
    if db_user is None:
        context["errors"] = ["Invalid e-mail."]
        return render_template(request, "passreset.html", context,
                               status_code=HTTPStatus.NOT_FOUND)

    db.refresh(db_user)
    if resetkey:
        context["resetkey"] = resetkey

        if not db_user.ResetKey or resetkey != db_user.ResetKey:
            context["errors"] = ["Invalid e-mail."]
            return render_template(request, "passreset.html", context,
                                   status_code=HTTPStatus.NOT_FOUND)

        if not user or not password:
            context["errors"] = ["Missing a required field."]
            return render_template(request, "passreset.html", context,
                                   status_code=HTTPStatus.BAD_REQUEST)

        if password != confirm:
            # If the provided password does not match the provided confirm.
            context["errors"] = ["Password fields do not match."]
            return render_template(request, "passreset.html", context,
                                   status_code=HTTPStatus.BAD_REQUEST)

        if len(password) < models.User.minimum_passwd_length():
            # Translate the error here, which simplifies error output
            # in the jinja2 template.
            _ = get_translator_for_request(request)
            context["errors"] = [_(
                "Your password must be at least %s characters.") % (
                str(models.User.minimum_passwd_length()))]
            return render_template(request, "passreset.html", context,
                                   status_code=HTTPStatus.BAD_REQUEST)

        # We got to this point; everything matched up. Update the password
        # and remove the ResetKey.
        with db.begin():
            db_user.ResetKey = str()
            if db_user.session:
                db.delete(db_user.session)
            db_user.update_password(password)

        # Render ?step=complete.
        return RedirectResponse(url="/passreset?step=complete",
                                status_code=HTTPStatus.SEE_OTHER)

    # If we got here, we continue with issuing a resetkey for the user.
    resetkey = generate_resetkey()
    with db.begin():
        db_user.ResetKey = resetkey

    ResetKeyNotification(db_user.ID).send()

    # Render ?step=confirm.
    return RedirectResponse(url="/passreset?step=confirm",
                            status_code=HTTPStatus.SEE_OTHER)
コード例 #37
0
    def get_column_hist(self, column, bins):
        """return a list of counts corresponding to bins

        Args:
            column: the name of the column for which to get the histogram
            bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
        """
        case_conditions = []
        idx = 0
        bins = list(bins)

        # If we have an infinte lower bound, don't express that in sql
        if (bins[0] == -np.inf) or (bins[0] == -float("inf")):
            case_conditions.append(
                sa.func.sum(
                    sa.case(
                        [
                            (sa.column(column) < bins[idx+1], 1)
                        ], else_=0
                    )
                ).label("bin_" + str(idx))
            )
            idx += 1

        for idx in range(idx, len(bins)-2):
            case_conditions.append(
                sa.func.sum(
                    sa.case(
                        [
                            (sa.and_(
                                bins[idx] <= sa.column(column),
                                sa.column(column) < bins[idx+1]
                            ), 1)
                        ], else_=0
                    )
                ).label("bin_" + str(idx))
            )

        if (bins[-1] == np.inf) or (bins[-1] == float("inf")):
            case_conditions.append(
                sa.func.sum(
                    sa.case(
                        [
                            (bins[-2] <= sa.column(column), 1)
                        ], else_=0
                    )
                ).label("bin_" + str(len(bins)-1))
            )
        else:    
            case_conditions.append(
                sa.func.sum(
                    sa.case(
                        [
                            (sa.and_(
                                bins[-2] <= sa.column(column),
                                sa.column(column) <= bins[-1]
                            ), 1)
                        ], else_=0
                    )
                ).label("bin_" + str(len(bins)-1))
            )

        query = sa.select(
            case_conditions
        )\
        .where(
            sa.column(column) != None,
        )\
        .select_from(self._table)

        hist = list(self.engine.execute(query).fetchone())
        return hist
コード例 #38
0
ファイル: admin.py プロジェクト: wollsi/calibre-web
def edit_user(user_id):
    content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first()  # type: ub.User
    downloads = list()
    languages = speaking_language()
    translations = babel.list_translations() + [LC('en')]
    for book in content.downloads:
        downloadbook = db.session.query(db.Books).filter(db.Books.id == book.book_id).first()
        if downloadbook:
            downloads.append(downloadbook)
        else:
            ub.delete_download(book.book_id)
            # ub.session.query(ub.Downloads).filter(book.book_id == ub.Downloads.book_id).delete()
            # ub.session.commit()
    if request.method == "POST":
        to_save = request.form.to_dict()
        if "delete" in to_save:
            if ub.session.query(ub.User).filter(and_(ub.User.role.op('&')
                                                             (constants.ROLE_ADMIN)== constants.ROLE_ADMIN,
                                                         ub.User.id != content.id)).count():
                ub.session.query(ub.User).filter(ub.User.id == content.id).delete()
                ub.session.commit()
                flash(_(u"User '%(nick)s' deleted", nick=content.nickname), category="success")
                return redirect(url_for('admin.admin'))
            else:
                flash(_(u"No admin user remaining, can't delete user", nick=content.nickname), category="error")
                return redirect(url_for('admin.admin'))
        else:
            if "password" in to_save and to_save["password"]:
                content.password = generate_password_hash(to_save["password"])
            anonymous = content.is_anonymous
            content.role = constants.selected_roles(to_save)
            if anonymous:
                content.role |= constants.ROLE_ANONYMOUS
            else:
                content.role &= ~constants.ROLE_ANONYMOUS

            val = [int(k[5:]) for k in to_save if k.startswith('show_')]
            sidebar = ub.get_sidebar_config()
            for element in sidebar:
                value = element['visibility']
                if value in val and not content.check_visibility(value):
                    content.sidebar_view |= value
                elif not value in val and content.check_visibility(value):
                    content.sidebar_view &= ~value

            if "Show_detail_random" in to_save:
                content.sidebar_view |= constants.DETAIL_RANDOM
            else:
                content.sidebar_view &= ~constants.DETAIL_RANDOM

            content.mature_content = "Show_mature_content" in to_save

            if "default_language" in to_save:
                content.default_language = to_save["default_language"]
            if "locale" in to_save and to_save["locale"]:
                content.locale = to_save["locale"]
            if to_save["email"] and to_save["email"] != content.email:
                existing_email = ub.session.query(ub.User).filter(ub.User.email == to_save["email"].lower()) \
                    .first()
                if not existing_email:
                    content.email = to_save["email"]
                else:
                    flash(_(u"Found an existing account for this e-mail address."), category="error")
                    return render_title_template("user_edit.html", translations=translations, languages=languages,
                                                 mail_configured = config.get_mail_server_configured(),
                                                 new_user=0, content=content, downloads=downloads, registered_oauth=oauth_check,
                                                 title=_(u"Edit User %(nick)s", nick=content.nickname), page="edituser")
            if "nickname" in to_save and to_save["nickname"] != content.nickname:
                # Query User nickname, if not existing, change
                if not ub.session.query(ub.User).filter(ub.User.nickname == to_save["nickname"]).scalar():
                    content.nickname = to_save["nickname"]
                else:
                    flash(_(u"This username is already taken"), category="error")
                    return render_title_template("user_edit.html",
                                                 translations=translations,
                                                 languages=languages,
                                                 mail_configured=config.get_mail_server_configured(),
                                                 new_user=0, content=content,
                                                 downloads=downloads,
                                                 registered_oauth=oauth_check,
                                                 title=_(u"Edit User %(nick)s", nick=content.nickname),
                                                 page="edituser")

            if "kindle_mail" in to_save and to_save["kindle_mail"] != content.kindle_mail:
                content.kindle_mail = to_save["kindle_mail"]
        try:
            ub.session.commit()
            flash(_(u"User '%(nick)s' updated", nick=content.nickname), category="success")
        except IntegrityError:
            ub.session.rollback()
            flash(_(u"An unknown error occured."), category="error")
    return render_title_template("user_edit.html", translations=translations, languages=languages, new_user=0,
                                 content=content, downloads=downloads, registered_oauth=oauth_check,
                                 mail_configured=config.get_mail_server_configured(),
                                 title=_(u"Edit User %(nick)s", nick=content.nickname), page="edituser")
コード例 #39
0
total_sac_hit = func.sum(db.PitchFx.ab_result_sac_hit).label("total_sac_hit")
total_sac_fly = func.sum(db.PitchFx.ab_result_sac_fly).label("total_sac_fly")
total_errors = func.sum(db.PitchFx.ab_result_error).label("total_errors")
total_at_bats = (total_hits + total_outs + total_errors - total_sac_hit - total_sac_fly).label("total_at_bats")

total_singles = func.sum(db.PitchFx.ab_result_single).label("total_singles")
total_doubles = func.sum(db.PitchFx.ab_result_double).label("total_doubles")
total_triples = func.sum(db.PitchFx.ab_result_triple).label("total_triples")
total_homeruns = func.sum(db.PitchFx.ab_result_homerun).label("total_homeruns")

total_hard_hits = func.sum(db.PitchFx.is_hard_hit).label("total_hard_hits")
total_medium_hits = func.sum(db.PitchFx.is_medium_hit).label("total_medium_hits")
total_soft_hits = func.sum(db.PitchFx.is_soft_hit).label("total_soft_hits")
total_barrels = func.sum(db.PitchFx.is_barreled).label("total_barrels")

bad_whiff = case([(and_(db.PitchFx.swinging_strike == 1, db.PitchFx.outside_strike_zone == 1), 1)], else_=0).label(
    "bad_whiff"
)
total_bad_whiffs = func.sum(bad_whiff).label("total_bad_whiffs")
bad_whiff_rate = total_bad_whiffs / cast(total_swings, Float)
bad_whiff_rate = case([(total_swings > 0, bad_whiff_rate)], else_=0.0).label("bad_whiff_rate")

zone_rate = total_inside_strike_zone / cast(total_pitches, Float)
zone_rate = case([(total_pitches > 0, zone_rate)], else_=0.0).label("zone_rate")

called_strike_rate = total_called_strikes / cast(total_pitches, Float)
called_strike_rate = case([(total_pitches > 0, called_strike_rate)], else_=0.0).label("called_strike_rate")

swinging_strike_rate = total_swinging_strikes / cast(total_pitches, Float)
swinging_strike_rate = case([(total_pitches > 0, swinging_strike_rate)], else_=0.0).label("swinging_strike_rate")
コード例 #40
0
# Loop over the ResultProxy and print the state and its population in 2000
for result in connection.execute(stmt):
    print(result.state, result.pop2000)

#4
# Import and_
from sqlalchemy import and_

# Build a query for the census table: stmt
stmt = select([census])

# Append a where clause to select only non-male records from California using and_
stmt = stmt.where(
    # The state of California with a non-male sex
    and_(census.columns.state == 'California', census.columns.sex != 'M'))

# Loop over the ResultProxy printing the age and sex
for result in connection.execute(stmt):
    print(result.age, result.sex)

#5
# Build a query to select the state column: stmt
stmt = select([census.columns.state])

# Order stmt by the state column
stmt = stmt.order_by(census.columns.state)

# Execute the query and store the results: results
results = connection.execute(stmt).fetchall()
コード例 #41
0
ファイル: graphic.py プロジェクト: jpjust/ClimIFBA
def Graphic():
    # Define o timezone da plotagem e da hora que recebe do servidor
    matplotlib.rcParams['timezone'] = 'America/Bahia'
    tz_servidor = pytz.timezone('Etc/GMT+7')

    # Obtém os dados
    if request.method == "POST":
        data = getDate()
        primeiro_dia = data[0]
        ultimo_dia = data[1]
        primeiro_dia = primeiro_dia.astimezone(tz_servidor)
        ultimo_dia = ultimo_dia.astimezone(tz_servidor)
        medicoes = Medicao.query.filter(and_(Medicao.hora >= primeiro_dia),
                                        (Medicao.hora <= ultimo_dia)).order_by(
                                            Medicao.id.desc())
    else:
        ultimo_dia = datetime.now() - timedelta(days=1)
        ultimo_dia = ultimo_dia.astimezone(tz_servidor)
        medicoes = Medicao.query.filter(Medicao.hora >= ultimo_dia).order_by(
            Medicao.id.desc())

    # Se não houver medições, retorna nulo
    if medicoes.count() == 0:
        return 0

    temperaturas = []
    umidades = []
    x = []

    # Coleta as medições
    for m in medicoes:
        temperaturas.insert(0, m.temperatura)
        umidades.insert(0, m.umidade)
        hora = m.hora.replace(tzinfo=tz_servidor)
        x.insert(0, hora)

    # Cria um objeto Figure sem usar o pyplot
    fig = Figure()
    plt = fig.subplots()

    # Plota os dados
    plt.plot(x, temperaturas, 'k-', color='red')  # linha contínua vermelha
    plt2 = plt.twinx()
    plt2.plot(x, umidades, 'k-', color='blue')  # linha contínua azul

    # Ajusta títulos e formatação
    plt.set_title("Medições")
    plt.grid(True)
    plt.set_xlabel("Horário")
    plt.tick_params(axis="y", labelcolor="red")
    plt.set_ylabel("Temperatura (°C)", color="red")
    plt2.set_ylabel("Umidade do ar (%)", color="blue")
    plt2.tick_params(axis="y", labelcolor="blue")
    fig.autofmt_xdate()
    plt.xaxis.set_major_formatter(dates.DateFormatter('%d/%m/%Y - %H:%M'))
    fig.tight_layout()

    # Buffer temporário
    buf = BytesIO()
    fig.savefig(buf, format="png", transparent=True)

    # Embute o conteúdo no html
    data = base64.b64encode(buf.getbuffer()).decode("ascii")
    return data
コード例 #42
0
ファイル: admin.py プロジェクト: wollsi/calibre-web
def view_configuration():
    readColumn = db.session.query(db.Custom_Columns)\
            .filter(and_(db.Custom_Columns.datatype == 'bool',db.Custom_Columns.mark_for_delete == 0)).all()
    return render_title_template("config_view_edit.html", conf=config, readColumns=readColumn,
                                 title=_(u"UI Configuration"), page="uiconfig")
コード例 #43
0
ファイル: sql.py プロジェクト: zell92/Griffith-mirror
def update_whereclause(query, cond):  # {{{
    if cond['loaned'] is True:
        query = query.where(db.Movie.loaned == True)
    if cond['loaned'] is False:
        query = query.where(db.Movie.loaned == False)
    if cond['seen'] is True:
        query = query.where(db.Movie.seen == True)
    if cond['seen'] is False:
        query = query.where(db.Movie.seen == False)

    if cond["collections"]:
        query = query.where(db.Movie.collection_id.in_(cond["collections"]))
    if cond["no_collections"]:
        query = query.where(
            or_(~db.Movie.collection_id.in_(cond["no_collections"]),
                db.Movie.collection_id == None))

    if cond["volumes"]:
        query = query.where(db.Movie.volume_id.in_(cond["volumes"]))
    if cond["no_volumes"]:
        query = query.where(
            or_(~db.Movie.volume_id.in_(cond["no_volumes"]),
                db.Movie.volume_id == None))

    loaned_to = []
    for per_id in cond["loaned_to"]:
        loaned_to.append(exists([db.tables.loans.c.movie_id],\
                and_(db.Movie.movie_id == db.tables.loans.c.movie_id,
                     db.tables.loans.c.person_id == per_id,
                     db.tables.loans.c.return_date == None)))
    if loaned_to:
        query = query.where(or_(*loaned_to))

    loan_history = []
    for per_id in cond["loan_history"]:
        loan_history.append(exists([db.tables.loans.c.movie_id],\
                and_(db.Movie.movie_id == db.tables.loans.c.movie_id,
                     db.tables.loans.c.person_id == per_id)))
    if loan_history:
        query = query.where(or_(*loan_history))

    required_tags = []
    for tag_id in cond["required_tags"]:
        required_tags.append(exists([db.MovieTag.movie_id], \
            and_(db.Movie.movie_id == db.MovieTag.movie_id,
                 db.MovieTag.tag_id == tag_id)))
    if required_tags:
        query = query.where(and_(*required_tags))

    tags = []
    for tag_id in cond["tags"]:
        tags.append(exists([db.MovieTag.movie_id], \
            and_(db.Movie.movie_id == db.MovieTag.movie_id,
                 db.MovieTag.tag_id == tag_id)))
    if tags:
        query = query.where(or_(*tags))

    no_tags = []
    for tag_id in cond["no_tags"]:
        no_tags.append(~exists([db.MovieTag.movie_id], \
            and_(db.Movie.movie_id == db.MovieTag.movie_id,
                 db.MovieTag.tag_id == tag_id)))
    if no_tags:
        query = query.where(and_(*no_tags))

    for field in cond["equals_n"]:
        values = [
            db.tables.movies.columns[field] != value
            for value in cond["equals_n"][field]
        ]
        query = query.where(and_(*values))

    for field in cond["startswith_n"]:
        values = [
            not_(db.tables.movies.columns[field].startswith(value))
            for value in cond["startswith_n"][field]
        ]
        query = query.where(and_(*values))

    for field in cond["like_n"]:
        values = [
            not_(db.tables.movies.columns[field].like(value))
            for value in cond["like_n"][field]
        ]
        query = query.where(and_(*values))

    for field in cond[
            "contains_n"]:  # XXX: it's not the SQLAlchemy's .contains() i.e. not for one-to-many or many-to-many collections
        values = [
            not_(db.tables.movies.columns[field].like('%' + value + '%'))
            for value in cond["contains_n"][field]
        ]
        query = query.where(and_(*values))

    for field in cond["equals"]:
        values = [
            db.tables.movies.columns[field] == value
            for value in cond["equals"][field]
        ]
        query = query.where(or_(*values))

    for field in cond["startswith"]:
        values = [
            db.tables.movies.columns[field].startswith(value)
            for value in cond["startswith"][field]
        ]
        query = query.where(or_(*values))

    for field in cond["like"]:
        values = [
            db.tables.movies.columns[field].like(value)
            for value in cond["like"][field]
        ]
        query = query.where(or_(*values))

    for field in cond[
            "contains"]:  # XXX: it's not the SQLAlchemy's .contains() i.e. not for one-to-many or many-to-many collections
        values = [
            db.tables.movies.columns[field].like('%' + value + '%')
            for value in cond["contains"][field]
        ]
        query = query.where(or_(*values))

    # sorting
    if not isinstance(query, (Update, Delete)):
        for rule in cond.get('sort_by', []):
            if rule.endswith(" DESC"):
                reverse = True
                column = rule.replace(" DESC", '')
            else:
                column = rule.replace(" ASC",
                                      '')  # note that " ASC" is optional
                reverse = False

            table = 'movies'
            tmp = column.split('.')
            if len(tmp) > 1:
                table = tmp[0]
                column = tmp[1]

            if reverse:
                query = query.order_by(
                    desc(db.metadata.tables[table].columns[column]))
            else:
                query = query.order_by(
                    asc(db.metadata.tables[table].columns[column]))

    log.debug(query)
    return query  #}}}
def remove_presences(name, description):
    op.execute(ctipresences_table.delete().where(
        and_(ctipresences_table.c.name == name,
             ctipresences_table.c.description == description,
             ctipresences_table.c.deletable == 0)))
コード例 #45
0
ファイル: views.py プロジェクト: Ouwen/quilt-compiler
def package_put(auth_user, owner, package_name, package_hash):
    # TODO: Write access for collaborators.
    if auth_user != owner:
        raise ApiException(requests.codes.forbidden,
                           "Only the package owner can push packages.")

    # TODO: Description.
    data = json.loads(request.data.decode('utf-8'), object_hook=decode_node)
    dry_run = data.get('dry_run', False)
    public = data.get('public', False)
    contents = data['contents']

    if hash_contents(contents) != package_hash:
        raise ApiException(requests.codes.bad_request, "Wrong contents hash")

    all_hashes = set(find_object_hashes(contents))

    # Insert a package if it doesn't already exist.
    # TODO: Separate endpoint for just creating a package with no versions?
    package = (Package.query.with_for_update().filter_by(
        owner=owner, name=package_name).one_or_none())

    if package is None:
        # Check for case-insensitive matches, and reject the push.
        package_ci = (Package.query.filter(
            sa.and_(
                sa.sql.collate(Package.owner, UTF8_GENERAL_CI) == owner,
                sa.sql.collate(
                    Package.name,
                    UTF8_GENERAL_CI) == package_name)).one_or_none())

        if package_ci is not None:
            raise ApiException(
                requests.codes.forbidden, "Package already exists: %s/%s" %
                (package_ci.owner, package_ci.name))

        if HAVE_PAYMENTS and not public:
            customer = _get_or_create_customer()
            plan = _get_customer_plan(customer)
            if plan == PaymentPlan.FREE:
                browser = g.user_agent['browser']
                if (browser['name'] == 'QuiltCli' and PackagingVersion(
                        browser['version']) <= PackagingVersion('2.5.0')):
                    # Need 2.5.1 to create public packages.
                    raise ApiException(
                        requests.codes.server_error,
                        "Outdated client. Run `pip install quilt --upgrade` to upgrade."
                    )
                else:
                    raise ApiException(requests.codes.payment_required, (
                        "Insufficient permissions. Run `quilt push --public %s/%s` to make "
                        +
                        "this package public, or upgrade your service plan to create "
                        + "private packages: https://quiltdata.com/profile.") %
                                       (owner, package_name))

        package = Package(owner=owner, name=package_name)
        db.session.add(package)

        owner_access = Access(package=package, user=owner)
        db.session.add(owner_access)

        if public:
            public_access = Access(package=package, user=PUBLIC)
            db.session.add(public_access)
    else:
        if public:
            public_access = (Access.query.filter(
                sa.and_(Access.package == package,
                        Access.user == PUBLIC)).one_or_none())
            if public_access is None:
                raise ApiException(
                    requests.codes.forbidden,
                    ("%(user)s/%(pkg)s is private. To make it public, " +
                     "run `quilt access add %(user)s/%(pkg)s public`.") %
                    dict(user=owner, pkg=package_name))

    # Insert an instance if it doesn't already exist.
    instance = (Instance.query.with_for_update().filter_by(
        package=package, hash=package_hash).one_or_none())

    contents_str = json.dumps(contents, default=encode_node)

    if len(contents_str) > MAX_METADATA_SIZE:
        # Should never actually happen because of nginx limits.
        raise ApiException(requests.codes.server_error,
                           "Metadata size too large")

    # No more error checking at this point, so return from dry-run early.
    if dry_run:
        db.session.rollback()

        # List of signed URLs is potentially huge, so stream it.

        def _generate():
            yield '{"upload_urls":{'
            for idx, blob_hash in enumerate(all_hashes):
                comma = ('' if idx == 0 else ',')
                value = dict(
                    head=_generate_presigned_url(S3_HEAD_OBJECT, owner,
                                                 blob_hash),
                    put=_generate_presigned_url(S3_PUT_OBJECT, owner,
                                                blob_hash))
                yield '%s%s:%s' % (comma, json.dumps(blob_hash),
                                   json.dumps(value))
            yield '}}'

        return Response(_generate(), content_type='application/json')

    if instance is None:
        instance = Instance(package=package,
                            contents=contents_str,
                            hash=package_hash,
                            created_by=auth_user,
                            updated_by=auth_user)

        # Add all the hashes that don't exist yet.

        blobs = (S3Blob.query.with_for_update().filter(
            sa.and_(S3Blob.owner == owner,
                    S3Blob.hash.in_(all_hashes))).all()) if all_hashes else []

        existing_hashes = {blob.hash for blob in blobs}

        for blob_hash in all_hashes:
            if blob_hash not in existing_hashes:
                instance.blobs.append(S3Blob(owner=owner, hash=blob_hash))
    else:
        # Just update the contents dictionary.
        # Nothing else could've changed without invalidating the hash.
        instance.contents = contents_str
        instance.updated_by = auth_user

    db.session.add(instance)

    # Insert a log.
    log = Log(
        package=package,
        instance=instance,
        author=owner,
    )
    db.session.add(log)

    db.session.commit()

    _mp_track(
        type="push",
        package_owner=owner,
        package_name=package_name,
        public=public,
    )

    return dict()
コード例 #46
0
def main():
    ini_file = sys.argv[1]
    conf_parser = ConfigParser.ConfigParser({'here': os.getcwd()})
    conf_parser.read(ini_file)
    configuration = {}
    for key, value in conf_parser.items("app:main"):
        configuration[key] = value
    database_connection = configuration['database_connection']
    file_path = configuration['file_path']
    app = TestApplication(database_connection=database_connection,
                          file_path=file_path)
    jobs = {}
    try:
        for job in app.model.Job.filter(
                sa.and_(
                    app.model.Job.table.c.create_time < '2008-12-16',
                    app.model.Job.table.c.state == 'ok',
                    app.model.Job.table.c.tool_id == 'gops_join_1',
                    sa.not_(app.model.Job.table.c.command_line.like(
                        '%-m 1 %')))).all():
            print "# processing job id %s" % str(job.id)
            for jtoda in job.output_datasets:
                print "# --> processing JobToOutputDatasetAssociation id %s" % str(
                    jtoda.id)
                hda = app.model.HistoryDatasetAssociation.get(jtoda.dataset_id)
                print "# ----> processing HistoryDatasetAssociation id %s" % str(
                    hda.id)
                if not hda.deleted:
                    # Probably don't need this check, since the job state should suffice, but...
                    if hda.dataset.state == 'ok':
                        history = app.model.History.get(hda.history_id)
                        print "# ------> processing history id %s" % str(
                            history.id)
                        if history.user_id:
                            cmd_line = str(job.command_line)
                            new_output = tempfile.NamedTemporaryFile('w')
                            new_cmd_line = " ".join(
                                map(str,
                                    cmd_line.split()[:4])
                            ) + " " + new_output.name + " " + " ".join(
                                map(str,
                                    cmd_line.split()[5:]))
                            job_output = cmd_line.split()[4]
                            try:
                                os.system(new_cmd_line)
                            except:
                                pass
                            diff_status = os.system(
                                'diff %s %s >> /dev/null' %
                                (new_output.name, job_output))
                            if diff_status == 0:
                                continue
                            print "# --------> Outputs differ"
                            user = app.model.User.get(history.user_id)
                            jobs[job.id] = {}
                            jobs[job.id]['hda_id'] = hda.id
                            jobs[job.id]['hda_name'] = hda.name
                            jobs[job.id]['hda_info'] = hda.info
                            jobs[job.id]['history_id'] = history.id
                            jobs[job.id]['history_name'] = history.name
                            jobs[job.id][
                                'history_update_time'] = history.update_time
                            jobs[job.id]['user_email'] = user.email
    except Exception, e:
        print "# caught exception: %s" % str(e)
コード例 #47
0
ファイル: Assets.py プロジェクト: levvli/sparrow
def assets():
    try:
        form = MyForm.MyFormServer()
        tables = ('机柜数量', '物理机数量', '虚拟机数量')
        ns = ('网络设备数量', '存储设备数量', '应用部署情况')
        phosts = []
        vhosts = []
        networks = []
        stores = []
        total = []
        #获取机房机柜信息
        db_idc_id = db_idc.idc_id
        db_hosts = db_idc.idc_servers
        values = db_idc_id.query.with_entities(
            db_idc_id.aid,
            func.count(db_idc_id.cid)).group_by(db_idc_id.aid).all()
        values = [list(val) for val in values]
        c_val = db_idc_id.query.with_entities(func.count(
            db_idc_id.cid)).filter(~db_idc_id.cid.in_(('KVM', 'OSS',
                                                       ''))).all()
        p_val = db_hosts.query.with_entities(func.count(
            db_hosts.ip)).filter(db_hosts.host_type == 'physical').all()
        v_val = db_hosts.query.with_entities(func.count(
            db_hosts.ip)).filter(db_hosts.host_type == 'vm').all()
        e_val = db_hosts.query.with_entities(func.count(db_hosts.ip)).filter(
            and_(db_hosts.host_type == 'physical',
                 db_hosts.expird_date < tt)).all()
        w_val = db_hosts.query.with_entities(func.count(db_hosts.ip)).filter(
            and_(db_hosts.host_type == 'physical', db_hosts.expird_date >= tt,
                 db_hosts.expird_date <= dt)).all()
        try:
            total.append(len(values))
            total.append(int(c_val[0][0]))
            total.append(int(p_val[0][0]))
            if e_val:
                total.append(int(e_val[0][0]))
            else:
                total.append(0)
            if w_val:
                total.append(int(w_val[0][0]))
            else:
                total.append(0)
            total.append(int(v_val[0][0]))
            Key = "op_disconnet_assets_count"
            d_val = Redis.smembers(Key)
            if d_val:
                total.append(len(d_val))
            else:
                total.append(0)
        except Exception as e:
            logging.error(e)
        for val in values:
            try:
                #获取指定机房机柜的服务器信息
                idc_id = db_idc_id.query.with_entities(
                    db_idc_id.id).filter(db_idc_id.aid == val[0]).all()
                idc_id = tuple([id[0] for id in idc_id])
                #统计物理服务器数量
                phost_count = db_hosts.query.with_entities(
                    func.count(db_hosts.ip)).filter(
                        and_(db_hosts.host_type == 'physical',
                             db_hosts.idc_id.in_(idc_id))).all()
                phosts.append(phost_count)
                #获取虚拟机数量
                vhost_count = db_hosts.query.with_entities(
                    func.count(db_hosts.ip)).filter(
                        and_(db_hosts.host_type == 'vm',
                             db_hosts.idc_id.in_(idc_id))).all()
                vhosts.append(vhost_count)
                # 获取网络设备和存储设备、附属设备
                db_network = db_idc.idc_networks
                db_store = db_idc.idc_store
                network_count = db_network.query.with_entities(
                    func.count(db_network.ip)).filter(
                        db_network.idc_id.in_(idc_id)).all()
                networks.append(network_count)
                store_count = db_store.query.with_entities(
                    func.count(db_store.ip)).filter(
                        db_store.idc_id.in_(idc_id)).all()
                stores.append(store_count)
            except Exception as e:
                logging.error(e)
        #信息进行合并
        try:
            phosts = [host[0][0] for host in phosts]
            vhosts = [host[0][0] for host in vhosts]
            networks = [host[0][0] for host in networks]
            stores = [host[0][0] for host in stores]
            for i, val in enumerate(values):
                if int(vhosts[i]) > 0:
                    val[1] = int(val[1]) - 1
                if int(phosts[i]) == 0:
                    val[1] = 0
                val.append(phosts[i])
                val.append(vhosts[i])
                val.append(networks[i])
                val.append(stores[i])
                val.append('查看')
                values[i] = val
        except Exception as e:
            logging.error(e)
        return render_template('assets.html',
                               values=values,
                               tables=tables,
                               ns=ns,
                               form=form,
                               total=total)
    except Exception as e:
        logging.error(e, "error")
        flash('获取数据错误!', "error")
        return render_template('Message.html')
コード例 #48
0
ファイル: issue_tracker.py プロジェクト: vjsavo4324/ggrc-core
 def join_function():
   """Object and Notification join function."""
   object_id = sa.orm.foreign(IssuetrackerIssue.object_id)
   object_type = sa.orm.foreign(IssuetrackerIssue.object_type)
   return sa.and_(object_type == cls.__name__,
                  object_id == cls.id)
コード例 #49
0
def get_channel_annotation_stats(channel_id, checksums=None):
    bridge = Bridge(app_name=CONTENT_APP_NAME)

    ContentNodeTable = bridge.get_table(ContentNode)
    FileTable = bridge.get_table(File)
    LocalFileTable = bridge.get_table(LocalFile)
    if checksums is not None:
        file_table = FileTable.join(
            LocalFileTable,
            and_(
                FileTable.c.local_file_id == LocalFileTable.c.id,
                or_(
                    # checksums are not uuids and have been got from
                    # get_channel_stats_from_disk, so no need to validate them:
                    filter_by_uuids(LocalFileTable.c.id, checksums, validate=False),
                    LocalFileTable.c.available == True,  # noqa
                ),
            ),
        )
    else:
        file_table = FileTable.join(
            LocalFileTable, FileTable.c.local_file_id == LocalFileTable.c.id
        )

    contentnode_statement = (
        select([FileTable.c.contentnode_id])
        .select_from(file_table)
        .where(FileTable.c.supplementary == False)  # noqa
        .where(
            or_(*(FileTable.c.preset == preset for preset in renderable_files_presets))
        )
        .where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
    )
    connection = bridge.get_connection()

    # start a transaction

    trans = connection.begin()

    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                ContentNodeTable.c.kind != content_kinds.TOPIC,
                ContentNodeTable.c.channel_id == channel_id,
            )
        )
        .values(available=exists(contentnode_statement))
    )

    ContentNodeClass = bridge.get_class(ContentNode)

    node_depth = (
        bridge.session.query(func.max(ContentNodeClass.level))
        .filter_by(channel_id=channel_id)
        .scalar()
    )

    child = ContentNodeTable.alias()

    # Update all leaf ContentNodes to have num_coach_content to 1 or 0
    # Update all leaf ContentNodes to have on_device_resources to 1 or 0
    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                # In this channel
                ContentNodeTable.c.channel_id == channel_id,
                # That are not topics
                ContentNodeTable.c.kind != content_kinds.TOPIC,
            )
        )
        .values(
            num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
            on_device_resources=cast(ContentNodeTable.c.available, Integer()),
        )
    )

    # Before starting set availability to False on all topics.
    connection.execute(
        ContentNodeTable.update()
        .where(
            and_(
                # In this channel
                ContentNodeTable.c.channel_id == channel_id,
                # That are topics
                ContentNodeTable.c.kind == content_kinds.TOPIC,
            )
        )
        .values(available=False)
    )

    # Expression to capture all available child nodes of a contentnode
    available_nodes = select([child.c.available]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    # Expressions for annotation of coach content

    # Expression that will resolve a boolean value for all the available children
    # of a content node, whereby if they all have coach_content flagged on them, it will be true,
    # but otherwise false.
    # Everything after the select statement should be identical to the available_nodes expression above.
    if bridge.engine.name == "sqlite":
        # Use a min function to simulate an AND.
        coach_content_nodes = select([func.min(child.c.coach_content)]).where(
            and_(
                child.c.available == True,  # noqa
                ContentNodeTable.c.id == child.c.parent_id,
            )
        )
    elif bridge.engine.name == "postgresql":
        # Use the postgres boolean AND operator
        coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
            and_(
                child.c.available == True,  # noqa
                ContentNodeTable.c.id == child.c.parent_id,
            )
        )

    # Expression that sums the total number of coach contents for each child node
    # of a contentnode
    coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    # Expression that sums the total number of on_device_resources for each child node
    # of a contentnode
    on_device_num = select([func.sum(child.c.on_device_resources)]).where(
        and_(
            child.c.available == True,  # noqa
            ContentNodeTable.c.id == child.c.parent_id,
        )
    )

    stats = {}

    # Go from the deepest level to the shallowest
    for level in range(node_depth, 0, -1):

        # Only modify topic availability here
        connection.execute(
            ContentNodeTable.update()
            .where(
                and_(
                    ContentNodeTable.c.level == level - 1,
                    ContentNodeTable.c.channel_id == channel_id,
                    ContentNodeTable.c.kind == content_kinds.TOPIC,
                )
            )
            # Because we have set availability to False on all topics as a starting point
            # we only need to make updates to topics with available children.
            .where(exists(available_nodes))
            .values(
                available=exists(available_nodes),
                coach_content=coach_content_nodes,
                num_coach_contents=coach_content_num,
                on_device_resources=on_device_num,
            )
        )

        level_stats = connection.execute(
            select(
                [
                    ContentNodeTable.c.id,
                    ContentNodeTable.c.coach_content,
                    ContentNodeTable.c.num_coach_contents,
                    ContentNodeTable.c.on_device_resources,
                ]
            ).where(
                and_(
                    ContentNodeTable.c.level == level,
                    ContentNodeTable.c.channel_id == channel_id,
                    ContentNodeTable.c.available == True,  # noqa
                )
            )
        )

        for stat in level_stats:
            stats[stat[0]] = {
                "coach_content": bool(stat[1]),
                "num_coach_contents": stat[2] or 0,
                "total_resources": stat[3] or 0,
            }

    root_node_stats = connection.execute(
        select(
            [
                ContentNodeTable.c.id,
                ContentNodeTable.c.coach_content,
                ContentNodeTable.c.num_coach_contents,
                ContentNodeTable.c.on_device_resources,
            ]
        ).where(
            and_(
                ContentNodeTable.c.level == 0,
                ContentNodeTable.c.channel_id == channel_id,
            )
        )
    ).fetchone()

    stats[root_node_stats[0]] = {
        "coach_content": root_node_stats[1],
        "num_coach_contents": root_node_stats[2],
        "total_resources": root_node_stats[3],
    }

    # rollback the transaction to undo the temporary annotation
    trans.rollback()

    bridge.end()

    return stats
コード例 #50
0
ファイル: Assets.py プロジェクト: levvli/sparrow
def assets_deploy(room=None, busi=None, idc=None):
    CONFS = defaultdict
    INFOS = defaultdict
    BUSIS = defaultdict
    busi_vals = defaultdict
    idc_vals = defaultdict
    db_dic_id = db_idc.idc_id
    db_server = db_idc.idc_servers
    db_third = db_idc.third_resource
    db_project = db_op.project_list
    db_busi = db_op.business
    db_project_third = db_op.project_third
    try:
        if room:
            try:
                idcs = db_dic_id.query.with_entities(
                    db_dic_id.id).filter(db_dic_id.aid == room).all()
                if idcs:
                    #获取机房信息
                    idcs = tuple([int(idc[0]) for idc in idcs])
                    #获取资产信息
                    vals = db_server.query.with_entities(
                        db_server.hostname, db_server.ip, db_server.ssh_port,
                        db_server.host_type,
                        db_server.cpu_core, db_server.mem).filter(
                            db_server.idc_id.in_(idcs)).all()
            except Exception as e:
                logging.error(e)
        if busi:
            try:
                busi_id = db_busi.query.with_entities(
                    db_busi.id).filter(db_busi.business == busi).all()
                busi_id = busi_id[0][0]
                projects = db_project.query.with_entities(
                    distinct(db_project.project)).filter(
                        db_project.business_id == busi_id).all()
                projects = tuple([project[0] for project in projects])
                ids = db_project_third.query.with_entities(
                    db_project_third.third_id).filter(
                        db_project_third.project.in_(projects)).all()
                third_ids = [id[0] for id in ids]
                self_ips = db_project.query.with_entities(
                    distinct(db_project.ip)).filter(
                        db_project.project.in_(projects)).all()
                self_ips = [ip[0] for ip in self_ips]
                third_ips = db_third.query.with_entities(
                    distinct(db_third.ip)).filter(
                        db_third.id.in_(tuple(third_ids))).all()
                third_ips = [ip[0] for ip in third_ips]
                if idc:
                    idc_ids = db_dic_id.query.with_entities(
                        db_dic_id.id).filter(db_dic_id.aid == idc).all()
                    idc_ids = tuple([id[0] for id in idc_ids])
                    vals = db_server.query.with_entities(
                        db_server.hostname, db_server.ip, db_server.ssh_port,
                        db_server.host_type, db_server.cpu_core,
                        db_server.mem).filter(
                            and_(db_server.ip.in_(tuple(self_ips + third_ips)),
                                 db_server.idc_id.in_(idc_ids))).all()
                else:
                    vals = db_server.query.with_entities(
                        db_server.hostname, db_server.ip, db_server.ssh_port,
                        db_server.host_type, db_server.cpu_core,
                        db_server.mem).filter(
                            db_server.ip.in_(tuple(self_ips +
                                                   third_ips))).all()
            except Exception as e:
                logging.error(e)
        try:
            CONFS = {val[0]: val[3:] for val in vals}
            INFOS = {val[0]: [] for val in vals}
            BUSIS = {val[0]: [] for val in vals}
            hostnames = {val[1]: val[0] for val in vals}
            ips = tuple([val[1] for val in vals])
            if room:
                #获取业务信息
                busi_vals = db_busi.query.with_entities(
                    db_busi.id, db_busi.business).all()
                busi_vals = {b[0]: b[1] for b in busi_vals}
            if busi:
                #获取机房信息
                idc_vals = db_dic_id.query.with_entities(
                    db_dic_id.id, db_dic_id.aid).all()
                idc_vals = {int(b[0]): b[1] for b in idc_vals}
            IDCS = db_server.query.with_entities(
                db_server.hostname,
                db_server.idc_id).filter(db_server.ip.in_(ips)).all()
            IDCS = {idc[0]: [int(idc[1])] for idc in IDCS}
        except Exception as e:
            logging.error(e)
        #获取应用服务信息
        try:
            third_vals = db_third.query.with_entities(
                db_third.resource_type,
                db_third.ip).filter(db_third.ip.in_(ips)).all()
            for info in third_vals:
                resource, ip = info
                INFOS[hostnames[ip]].append(resource)
            project_vals = db_project.query.with_entities(
                db_project.resource, db_project.ip,
                db_project.business_id).filter(db_project.ip.in_(ips)).all()
            for info in project_vals:
                resource, ip, business_id = info
                BUSIS[hostnames[ip]].append(business_id)
                INFOS[hostnames[ip]].append(resource)
        except Exception as e:
            logging.error(e)
        #格式化数据
        try:
            if INFOS:
                for info in INFOS:
                    INFOS[info] = set(INFOS[info])
            if BUSIS:
                for info in BUSIS:
                    BUSIS[info] = set(BUSIS[info])
        except Exception as e:
            logging.error(e)
    except Exception as e:
        logging.error(e)
    if room:
        tables = ['主机名', '部署应用', '线上业务', '主机类型', 'CPU核数', '内存']
        return render_template('assets_deploy.html',
                               INFOS=INFOS,
                               BUSIS=BUSIS,
                               tables=tables,
                               CONFS=CONFS,
                               busi_vals=busi_vals,
                               room=room,
                               busi=busi)
    if busi:
        tables = ['主机名', '部署应用', '机房', '主机类型', 'CPU核数', '内存']
        return render_template('assets_deploy.html',
                               INFOS=INFOS,
                               BUSIS=IDCS,
                               tables=tables,
                               CONFS=CONFS,
                               busi_vals=idc_vals,
                               room=room,
                               busi=busi)
コード例 #51
0
 def get_by_email_and_password(self, user_entity):
     filter_data = self.db.query(UserModel).filter(
         and_((user_entity.email == UserModel.email),
              (user_entity.password == UserModel.hash)))
     user_data = filter_data.one_or_none()
     return user_data
コード例 #52
0
ファイル: Assets.py プロジェクト: levvli/sparrow
def assets_get(action=None):
    #公共参数
    Args = {
        info: tools.http_args(request, info)
        for info in ('aid', 'ip', 'port', 'type', 'host_type', 'action',
                     'page', 'hostname')
    }
    search_key = 'search_results_%s' % g.token
    form = MyForm.MyFormServer()
    db = db_idc.idc_servers
    db_idc_id = db_idc.idc_id
    db_zabbix = db_idc.zabbix_info
    idc_vals = db_idc_id.query.with_entities(db_idc_id.id, db_idc_id.aid,
                                             db_idc_id.cid).all()
    idc_val = {val[0]: val[1] for val in idc_vals}
    cid_val = {val[0]: val[-1] for val in idc_vals}
    values = []
    tables = [
        '机房', '机柜', 'IP', 'ssh端口', '主机名', '服务器型号', '操作系统', 'CPU', '内存', '磁盘数',
        '磁盘总量', '远程管理IP', '购买日期', '状态'
    ]
    try:
        # 导出数据功能
        if action:
            if action == 'export':
                try:
                    file_name = "/tmp/export_assets.xlsx"
                    values = [list(val) for val in eval(Redis.get(search_key))]
                    if os.path.exists(file_name):
                        os.remove(file_name)
                    pyexcel.save_as(array=values,
                                    dest_file_name=file_name,
                                    sheet_name='export_assets')
                except Exception as e:
                    logging.error(e)
                else:
                    return send_file(file_name, as_attachment=True)
        # 判断是否查询
        if form.submit.data:
            ts = form.text.data.strip()
            Infos = {
                'ip': db.ip,
                'sn': db.sn,
                'hostname': db.hostname,
                'status': db.status
            }
            if ts:
                try:
                    if form.select.data == 'cid':
                        cid = ts.strip()
                        if not ts.endswith('机柜') and cid != 'KVM':
                            cid = '%s机柜' % ts.strip()
                        # 优先进行精确匹配
                        idc_id = db_idc_id.query.with_entities(
                            db_idc_id.id).filter(db_idc_id.cid == cid).all()
                        if not idc_id:
                            # 精确匹配不到结果后进行模糊匹配
                            idc_id = db_idc_id.query.with_entities(
                                db_idc_id.id).filter(
                                    db_idc_id.cid.like(
                                        '%{0}%'.format(cid))).all()
                        idc_id = tuple([id[0] for id in idc_id])
                        values = db.query.with_entities(
                            db.idc_id, db.ip, db.ssh_port, db.hostname,
                            db.productname, db.system, db.cpu_core, db.mem,
                            db.disk_count, db.disk_size, db.idrac,
                            db.purch_date,
                            db.status).filter(db.idc_id.in_(idc_id)).all()
                    if form.select.data == 'buy_date':
                        start_time = ts.split('to')[0].strip()
                        end_time = ts.split('to')[-1].strip()
                        values = db.query.with_entities(
                            db.idc_id, db.ip, db.ssh_port, db.hostname,
                            db.productname, db.system, db.cpu_core, db.mem,
                            db.disk_count, db.disk_size, db.idrac,
                            db.purch_date, db.status).filter(
                                and_(db.purch_date >= start_time,
                                     db.purch_date <= end_time,
                                     db.host_type == 'physical')).all()
                    if form.select.data in ('sn', 'hostname', 'ip', 'status'):
                        val = db.query.with_entities(
                            db.ip, db.ssh_port).filter(
                                Infos[form.select.data] == ts).all()
                        #优先进行精确匹配
                        if val and len(val) == 1:
                            ip, ssh_port = val[0]
                            return redirect(
                                "assets_get?type=server&ip=%s&port=%s" %
                                (ip, ssh_port))
                        else:
                            #精确匹配不到结果后进行模糊匹配
                            values = db.query.with_entities(
                                db.idc_id, db.ip, db.ssh_port, db.hostname,
                                db.productname, db.system, db.cpu_core, db.mem,
                                db.disk_count, db.disk_size, db.idrac,
                                db.purch_date,
                                db.status).filter(Infos[form.select.data].like(
                                    '%{0}%'.format(ts))).all()
                except Exception as e:
                    logging.error(e)
                else:
                    try:
                        #获取服务器信息
                        if values:
                            values = [list(val) for val in values]
                            for val in values:
                                id = val[0]
                                val[0] = idc_val[id]
                                val.insert(1, cid_val[id])
                            export_values = [val for val in values]
                            export_values.insert(0, tables)
                            Redis.set(search_key, export_values)
                    except Exception as e:
                        logging.error(e)
                    return render_template('server_list.html',
                                           values=values,
                                           tables=tables,
                                           form=form,
                                           export=True,
                                           assets_type='server')
        #获取API接口参数
        if Args:
            try:
                if Args['aid']:
                    aid = Args['aid']
                    idc_ids = db_idc_id.query.with_entities(
                        db_idc_id.id).filter(db_idc_id.aid == aid).all()
                    if idc_ids:
                        idc_ids = tuple([val[0] for val in idc_ids])
                else:
                    idc_ids = db_idc_id.query.with_entities(db_idc_id.id).all()
                    if idc_ids:
                        idc_ids = tuple([val[0] for val in idc_ids])
            except Exception as e:
                logging.error(e)
            #判断是否为服务器
            if Args['type'] == 'server':
                if Args['action']:
                    try:
                        action = Args['action']
                        if action == 'all_list':
                            if Args['host_type']:
                                host_type = Args['host_type']
                                if idc_ids:
                                    values = db.query.with_entities(
                                        db.idc_id, db.ip, db.ssh_port,
                                        db.hostname, db.productname, db.system,
                                        db.cpu_core, db.mem, db.disk_count,
                                        db.disk_size, db.idrac, db.purch_date,
                                        db.status).filter(
                                            and_(db.idc_id.in_(idc_ids),
                                                 db.host_type ==
                                                 host_type)).all()
                        if action == 'expire':
                            values = db.query.with_entities(
                                db.idc_id, db.ip, db.ssh_port, db.hostname,
                                db.productname, db.system, db.cpu_core, db.mem,
                                db.disk_count, db.disk_size, db.idrac,
                                db.purch_date, db.status).filter(
                                    and_(db.host_type == 'physical',
                                         db.expird_date < tt,
                                         db.idc_id != 0)).order_by(
                                             db.idc_id).all()
                        if action == 'about_to':
                            values = db.query.with_entities(
                                db.idc_id, db.ip, db.ssh_port, db.hostname,
                                db.productname, db.system, db.cpu_core, db.mem,
                                db.disk_count, db.disk_size, db.idrac,
                                db.purch_date, db.status).filter(
                                    and_(db.host_type == 'physical',
                                         db.expird_date >= tt,
                                         db.expird_date <= dt,
                                         db.idc_id != 0)).order_by(
                                             db.idc_id).all()
                        if action == 'search':
                            if Redis.exists(search_key):
                                values = eval(Redis.get(search_key))
                        if values:
                            Redis.set(search_key, values)
                            values = [list(val) for val in values]
                            for val in values:
                                id = val[0]
                                val[0] = idc_val[id]
                                val.insert(1, cid_val[id])
                    except Exception as e:
                        logging.error(e)
                    return render_template('server_list.html',
                                           values=values,
                                           tables=tables,
                                           form=form,
                                           export=True,
                                           assets_type=Args['type'])
                if (Args['ip'] and Args['port']) or Args['hostname']:
                    try:
                        total_infos = defaultdict()
                        ip = Args['ip']
                        ssh_port = Args['port']
                        hostname = Args['hostname']
                        total_infos['ssh_port'] = ssh_port
                        #获取服务器硬件信息
                        db_server = db_idc.idc_servers
                        if ip and ssh_port:
                            total_infos['ip'] = ip
                            server_info = db_server.query.with_entities(
                                db_server.idc_id, db_server.ip,
                                db_server.ssh_port, db_server.s_ip,
                                db_server.host_type, db_server.hostname,
                                db_server.sn, db_server.manufacturer,
                                db_server.productname, db_server.system,
                                db_server.cpu_info, db_server.cpu_core,
                                db_server.mem, db_server.disk_count,
                                db_server.disk_size, db_server.idrac,
                                db_server.purch_date, db_server.expird_date,
                                db_server.status, db_server.comment).filter(
                                    and_(
                                        db_server.ip == ip,
                                        db_server.ssh_port == ssh_port)).all()
                        if hostname:
                            total_infos['ip'] = hostname
                            server_info = db_server.query.with_entities(
                                db_server.idc_id, db_server.ip,
                                db_server.ssh_port, db_server.s_ip,
                                db_server.host_type, db_server.hostname,
                                db_server.sn, db_server.manufacturer,
                                db_server.productname, db_server.system,
                                db_server.cpu_info, db_server.cpu_core,
                                db_server.mem, db_server.disk_count,
                                db_server.disk_size, db_server.idrac,
                                db_server.purch_date, db_server.expird_date,
                                db_server.status, db_server.comment).filter(
                                    db_server.hostname == hostname).all()
                            ip = server_info[0][1]
                            ssh_port = int(server_info[0][2])
                    except Exception as e:
                        logging.error(e)
                    if server_info:
                        try:
                            server_info = list(server_info[0])
                            # 获取服务器机房机柜信息
                            idc_info = db_idc_id.query.with_entities(
                                db_idc_id.aid, db_idc_id.cid).filter(
                                    db_idc_id.id == int(server_info[0])).all()
                            server_info.pop(0)
                            if idc_info:
                                server_info.insert(0, idc_info[0][1])
                                server_info.insert(0, idc_info[0][0])
                            else:
                                server_info.insert(0, None)
                                server_info.insert(0, None)
                            table_info = [
                                '机房', '机柜', 'IP', 'SSH_PORT', '附属ip', '主机类型',
                                'hostname', 'sn', '生产厂家', '服务器型号', '操作系统',
                                'cpu信息', 'cpu核数', '内存', '磁盘数', '磁盘总量', 'idrac',
                                '采购日期', '过保日期', '状态', '管理', '备注'
                            ]
                            total_infos['server_info'] = [
                                table_info, server_info
                            ]
                        except Exception as e:
                            logging.error(e)
                        try:
                            tt = datetime.datetime.now() - datetime.timedelta(
                                minutes=15)
                            tt = tt.strftime('%Y-%m-%d %H:%M:%S')
                            zabbix_infos = [0, 0, 0, 0, 0]
                            vals = db_zabbix.query.with_entities(
                                db_zabbix.icmpping, db_zabbix.cpu_load,
                                db_zabbix.mem_use, db_zabbix.disk_io,
                                db_zabbix.openfile).filter(
                                    and_(db_zabbix.ip == server_info[2],
                                         db_zabbix.ssh_port == server_info[3],
                                         db_zabbix.update_time > tt)).all()
                            if vals:
                                zabbix_infos = [
                                    float(val) for val in list(vals[0])
                                ]
                            total_infos['zabbix_infos'] = zabbix_infos
                        except Exception as e:
                            logging.error(e)
                        try:
                            # 获取第三方资源信息
                            third_table = [
                                '应用服务', '应用端口', '所属项目', '集群类型', '业务使用', '所属部门',
                                '负责人', '联系方式'
                            ]
                            project_table = [
                                '应用服务', '应用端口', '所属项目', '域名', '开发语言', '环境',
                                '状态', '所属业务'
                            ]
                            total_infos['pool_project'] = True
                            db_third = db_idc.third_resource
                            db_project = db_op.project_list
                            db_busi = db_op.business
                            db_project_third = db_op.project_third
                            busis = db_busi.query.with_entities(
                                db_busi.id, db_busi.business).all()
                            busis = {int(busi[0]): busi[1] for busi in busis}
                            busis[0] = '未知业务'
                            project_third = db_project_third.query.with_entities(
                                db_project_third.third_id,
                                db_project_third.project).all()
                            if project_third:
                                project_third = {
                                    info[0]: info[1]
                                    for info in project_third
                                }
                            third_info = db_third.query.with_entities(
                                db_third.id, db_third.resource_type,
                                db_third.app_port, db_third.cluster_type,
                                db_third.busi_id, db_third.department,
                                db_third.person, db_third.contact).filter(
                                    and_(
                                        db_third.ip == ip,
                                        db_third.ssh_port == ssh_port,
                                    )).all()
                            if third_info:
                                third_info = [
                                    list(info) for info in third_info
                                ]
                                third_id = [info[0] for info in third_info]
                                for i, info in enumerate(third_info):
                                    info = list(info)
                                    info[4] = busis[int(info[4])]
                                    if project_third:
                                        if third_id[i] in project_third.keys():
                                            info.insert(
                                                3, project_third[third_id[i]])
                                        else:
                                            info.insert(3, '')
                                    else:
                                        info.insert(3, '')
                                    third_info[i] = info
                                third_info.insert(0, third_table)
                                total_infos['third_info'] = third_info
                        except Exception as e:
                            logging.error(e)
                        try:
                            #获取自有资源信息
                            project_info = db_project.query.with_entities(
                                db_project.id, db_project.resource,
                                db_project.app_port, db_project.project,
                                db_project.domain, db_project.sys_args,
                                db_project.env, db_project.status,
                                db_project.business_id).filter(
                                    and_(db_project.ip == ip,
                                         db_project.ssh_port ==
                                         ssh_port)).all()
                            project_info = [
                                list(info) for info in project_info
                            ]
                            if project_info:
                                for info in project_info:
                                    business = db_busi.query.with_entities(
                                        db_busi.business).filter(
                                            db_busi.id == int(info[-1])).all()
                                    info[-1] = '%s:%s' % (info[-1],
                                                          business[0][0])
                                project_info.insert(0, project_table)
                                total_infos['project_info'] = project_info
                        except Exception as e:
                            logging.error(e)
                    return render_template('server_infos.html',
                                           total_infos=total_infos)
            #判断是否是存储设备
            try:
                if Args['type'] == 'store' and Args['action']:
                    db_store = db_idc.idc_store
                    tables = ('机房', '机柜', '设备型号', 'ip', '购买日期', '过保日期', '状态',
                              '备注')
                    if idc_ids:
                        val = db_store.query.with_entities(
                            db_store.idc_id, db_store.type, db_store.ip,
                            db_store.purch_date, db_store.expird_date,
                            db_store.status, db_store.comment).filter(
                                db_store.idc_id.in_(idc_ids)).all()
                        values = [list(va) for va in val]
                        for value in values:
                            idc_id = int(value[0])
                            cid = db_idc_id.query.with_entities(
                                db_idc_id.aid, db_idc_id.cid).filter(
                                    db_idc_id.id == idc_id).all()
                            value.pop(0)
                            value.insert(0, cid[0][1])
                            value.insert(0, cid[0][0])
            except Exception as e:
                logging.error(e)
            #判断是否是网络设备
            try:
                if Args['type'] == 'network' and Args['action']:
                    db_network = db_idc.idc_networks
                    tables = ('机房', '机柜', '设备型号', 'ip', '冗余', '购买日期', '过保日期',
                              '状态', '备注')
                    if idc_ids:
                        val = db_network.query.with_entities(
                            db_network.idc_id, db_network.type, db_network.ip,
                            db_network.redundance, db_network.purch_date,
                            db_network.expird_date, db_network.status,
                            db_network.comment).filter(
                                db_network.idc_id.in_(idc_ids)).all()
                        values = [list(va) for va in val]
                        for value in values:
                            idc_id = int(value[0])
                            cid = db_idc_id.query.with_entities(
                                db_idc_id.aid, db_idc_id.cid).filter(
                                    db_idc_id.id == idc_id).all()
                            value.pop(0)
                            value.insert(0, cid[0][1])
                            value.insert(0, cid[0][0])
            except Exception as e:
                logging.error(e)
    except Exception as e:
        logging.error(e)
        flash('获取数据错误!', "error")
        return render_template('Message.html')
    return render_template('server_list.html',
                           values=values,
                           tables=tables,
                           form=form,
                           export=False,
                           assets_type=Args['type'])
コード例 #53
0
def district_feedin_series(request):
    """
    This function will return a json/geojson with pre calculated data for a single or multiple
    district.
    The data will include a feedin time series for each district.
    :return:
    """
    data = []
    if request.method == 'POST':
        landkreis_props = {
            k: request.POST.get(k)
            for k in ['id', 'gen', 'bez', 'nuts']
        }
        technology = str(request.POST.get('technology'))
        lk_id = landkreis_props['nuts']

        if LOCAL_TESTING is False:
            oep_query = Serializer.session.query(Timeseries) \
                .filter(
                    and_(
                        Timeseries.nuts == lk_id,
                        Timeseries.technology == technology
                    )
                )

            n_records = oep_query.count()

            timespan = []
            values = []
            nut = ''
            # for later csv file downloading
            CsvRow.objects.all().delete()
            for record in oep_query:
                t = record.time
                timespan.append(t)
                y = float(record.feedin) * 1e-6
                values.append(y)
                CsvRow.objects.create(time=t, val=y)
                nut = record.nuts

            data = dict(n_records=n_records,
                        landkreis_id=lk_id,
                        timespan=timespan,
                        values=values,
                        nut=nut,
                        properties=landkreis_props)

        else:
            data = dict(n_records=6,
                        landkreis_id=lk_id,
                        timespan=[
                            '2003-06-30T23:00:00', '2003-07-01T00:00:00',
                            '2003-07-01T00:00:00', '2003-07-01T01:00:00',
                            '2003-07-01T01:00:00', '2003-07-01T02:00:00'
                        ],
                        values=[1, 3, 9, 16, 25, 36],
                        nut='Wind',
                        properties=landkreis_props)
            # for later csv file downloading
            CsvRow.objects.all().delete()
            for x, y in zip(data['timespan'], data['values']):
                CsvRow.objects.create(time=x, val=y)

    elif request.method == 'GET':
        print(request.GET)

    return HttpResponse(dumps(data), content_type="application/json")
コード例 #54
0
ファイル: reserve.py プロジェクト: gkoller/SuPA
    def _port_resources_in_use(
            self, session: orm.Session) -> Dict[str, PortResources]:
        """Calculate port resources in use for active reservations that overlap with ours.

        Active reservations being those that:

        - are currently being held
        - have been committed and not yet been terminated.

        Overlap as in: their start times and end times overlap with ours.

        The bandwidth in use is calculated per port.
        Eg, if a port is used in two active reservations,
        (one reservation for a connection with a bandwidth of 100 Mbps
        and another with a bandwidth of 400 Mbps)
        the bandwidth in use for the port will be:
        100 + 400 = 500 Mbps.

        Similarly for the VLANs in use.
        Given the same port used in two active reservations
        (one reservation where the port has a VLAN of 100
        and another one where the port has a VLAN of 105),
        the VLANs in use for the port will be:
        VlanRanges([100, 105])

        Args:
            session: A SQLAlchemy session to construct and run the DB query

        Returns:
            A dict mapping port (names) to their port resources.

        """
        # To calculate the active overlapping reservation we need to perform a self-join.
        # One part of the join is for our (current) reservation.
        # The other part is for joining the overlapping ones with our (current) reservation.
        CurrentReservation = aliased(Reservation, name="cr")
        overlap_active = (
            # The other part
            session.query(Reservation).join((
                CurrentReservation,
                # Do they overlap?
                and_(
                    CurrentReservation.start_time < Reservation.end_time,
                    CurrentReservation.end_time > Reservation.start_time,
                ),
            )).filter(
                # Only select active reservations
                or_(
                    and_(
                        Reservation.reservation_state ==
                        ReservationStateMachine.ReserveStart.name,
                        Reservation.provisioning_state.isnot(None),
                        Reservation.lifecycle_state ==
                        LifecycleStateMachine.Created.name,
                    ),
                    Reservation.reservation_state ==
                    ReservationStateMachine.ReserveHeld.name,
                ))
            # And only those that overlap with our reservation.
            .filter(CurrentReservation.connection_id == self.connection_id
                    )).subquery()
        OverlappingActiveReservation = aliased(Reservation,
                                               overlap_active,
                                               name="oar")

        # To map ports to resources (bandwidth and vlan) in use
        # we need to unpivot the two pair of port columns from the reservations table into separate rows.
        # Eg, from:
        #
        # row 1:  connection_id, ..., src_port, src_selected_vlan, dst_port, .dst_selected_vlan ..
        #
        # to:
        #
        # row 1: connection_id, port, vlan  <-- former src_port, src_selected_vlan
        # row 2: connection_id, port, vlan  <-- former dst_port, dst_selected_vlan
        src_port = session.query(
            Reservation.connection_id.label("connection_id"),
            Reservation.src_port.label("port"),
            Reservation.src_selected_vlan.label("vlan"),
        )
        dst_port = session.query(
            Reservation.connection_id,
            Reservation.dst_port.label("port"),
            Reservation.dst_selected_vlan.label("vlan"),
        )
        ports = src_port.union(dst_port).subquery()

        # With the 'hard' work done for us in two subqueries,
        # calculating the port resources (bandwidth, VLANs) in use is now relatively straightforward.
        port_resources_in_use = (
            session.query(
                ports.c.port,
                func.sum(
                    OverlappingActiveReservation.bandwidth).label("bandwidth"),
                func.group_concat(ports.c.vlan,
                                  ",").label("vlans"),  # yes, plural!
            ).select_from(OverlappingActiveReservation).join(
                ports, OverlappingActiveReservation.connection_id ==
                ports.c.connection_id).filter(
                    ports.c.port.in_((
                        OverlappingActiveReservation.src_port,
                        OverlappingActiveReservation.dst_port,
                    ))).group_by(ports.c.port).all())

        return {
            rec.port: PortResources(bandwidth=rec.bandwidth,
                                    vlans=VlanRanges(rec.vlans))
            for rec in port_resources_in_use
        }
コード例 #55
0
def _check_provider_capacity(sess, claim_obj):
    """Verifies that providers have capacity for all resources listed in the
    supplied models.Claim object's list of allocations and returns a dict,
    keyed by provider UUID, of Provider objects that include the provider's
    generation at the time of the capacity check.
    """
    p_ids = set(
       alloc_item.provider.id for alloc_item in claim_obj.allocation_items
    )

    rt_ids = set(
        lookup.resource_type_id_from_code(alloc_item.resource_type)
        for alloc_item in claim_obj.allocation_items
    )

    # The SQL we generate here looks like this:
    #
    # SELECT
    #  p.id AS provider_id,
    #  p.uuid AS provider_uuid,
    #  p.generation AS provider_generation,
    #  i.resource_type_id,
    #  i.total,
    #  i.reserved,
    #  i.min_unit,
    #  i.max_unit,
    #  i.step_size,
    #  i.allocation_ratio,
    #  usages.total_used
    # FROM providers AS p
    # JOIN inventories AS i
    #   ON p.id = i.provider_id
    # LEFT JOIN (
    #   SELECT ai.provider_id, ai.resource_type_id, SUM(ai.used) AS total_used
    #   FROM allocation_items AS ai
    #   JOIN (
    #     SELECT id AS allocation_id
    #     FROM allocations
    #     WHERE acquire_time >= $CLAIM_START
    #     AND release_time < $CLAIM_END
    #     GROUP BY id
    #   ) AS allocs_in_window
    #     ON ai.allocation_id = allocs_in_window
    #   WHERE ai.resource_type_id = $RESOURCE_TYPE
    #   AND ai.provider_id IN ($PROVIDER_IDS)
    #   GROUP BY ai.provider_id, ai.resource_type_id
    # ) AS usages
    #   ON i.provider_id = usages.provider_id
    #   AND i.resource_type_id = usages.resource_type_id
    # WHERE i.resource_type_id IN ($RESOURCE_TYPES)
    # AND p.id IN ($PROVIDER_IDS)

    p_tbl = db.get_table('providers')
    inv_tbl = db.get_table('inventories')
    alloc_tbl = db.get_table('allocations')
    alloc_item_tbl = db.get_table('allocation_items')

    alloc_window_cols = [
        alloc_tbl.c.id.label('allocation_id'),
    ]
    allocs_in_window_subq = sa.select(alloc_window_cols).where(
        sa.and_(
            alloc_tbl.c.acquire_time >= claim_obj.acquire_time,
            alloc_tbl.c.release_time < claim_obj.release_time,
        )
    ).group_by(alloc_tbl.c.id)
    allocs_in_window_subq = sa.alias(allocs_in_window_subq, "allocs_in_window")
    usage_cols = [
        alloc_item_tbl.c.provider_id,
        alloc_item_tbl.c.resource_type_id,
        func.sum(alloc_item_tbl.c.used).label('total_used'),
    ]
    alloc_item_to_alloc_window = sa.outerjoin(
        alloc_item_tbl, allocs_in_window_subq,
        alloc_item_tbl.c.allocation_id == allocs_in_window_subq.c.allocation_id
    )
    usage_subq = sa.select(usage_cols).select_from(
        alloc_item_to_alloc_window
    ).where(
        sa.and_(
            alloc_item_tbl.c.resource_type_id.in_(rt_ids),
            alloc_item_tbl.c.provider_id.in_(p_ids)
        ),
    ).group_by(
        alloc_item_tbl.c.provider_id,
        alloc_item_tbl.c.resource_type_id,
    )
    usage_subq = sa.alias(usage_subq, "usages")

    p_to_inv = sa.join(
        p_tbl, inv_tbl,
        sa.and_(
            p_tbl.c.id == inv_tbl.c.provider_id,
            inv_tbl.c.resource_type_id.in_(rt_ids),
        )
    )
    inv_to_usage = sa.outerjoin(
        p_to_inv, usage_subq,
        sa.and_(
            inv_tbl.c.provider_id == usage_subq.c.provider_id,
            inv_tbl.c.resource_type_id == usage_subq.c.resource_type_id,
        ),
    )
    cols = [
        p_tbl.c.id.label('provider_id'),
        p_tbl.c.uuid.label('provider_uuid'),
        p_tbl.c.generation.label('provider_generation'),
        inv_tbl.c.resource_type_id,
        inv_tbl.c.total,
        inv_tbl.c.reserved,
        inv_tbl.c.min_unit,
        inv_tbl.c.max_unit,
        inv_tbl.c.step_size,
        inv_tbl.c.allocation_ratio,
        func.coalesce(usage_subq.c.total_used, 0).label('total_used'),
    ]
    sel = sa.select(cols).select_from(
        inv_to_usage
    ).where(
        sa.and_(
            inv_tbl.c.resource_type_id.in_(rt_ids),
            p_tbl.c.id.in_(p_ids),
        ),
    )
    recs = [dict(r) for r in sess.execute(sel)]

    # dict, keyed by provider_id, of ProviderUsage objects
    provider_usages = {}
    for rec in recs:
        p_id = rec['provider_id']
        if p_id not in provider_usages:
            p_obj = models.Provider(
                id=p_id,
                uuid=rec['provider_uuid'],
                generation=rec['provider_generation'],
            )
            provider_usages[p_id] = models.ProviderUsages(provider=p_obj)

        rt_id = rec['resource_type_id']
        p_usage = provider_usages[p_id]

        p_usage.usages[rt_id] = models.Usage(
            total=rec['total'],
            reserved=rec['reserved'],
            min_unit=rec['min_unit'],
            max_unit=rec['max_unit'],
            step_size=rec['step_size'],
            allocation_ratio=rec['allocation_ratio'],
            total_used=rec['total_used'],
        )

    # Verify that all providers listed in our claim's allocation items have
    # inventory for the resource types we're going to allocate for. You never
    # know, an admin may have deleted an inventory record or something in
    # between when our claim was generated and when we try to execute it...
    for alloc_item in claim_obj.allocation_items:
        if alloc_item.provider.id not in provider_usages:
            raise exception.MissingInventory(
                resource_type=alloc_item.resource_type,
                provider=alloc_item.provider.uuid,
            )
        else:
            alloc_rt_id = lookup.resource_type_id_from_code(
                alloc_item.resource_type)
            alloc_p_id = alloc_item.provider.id
            if alloc_rt_id not in provider_usages[alloc_p_id].usages:
                raise exception.MissingInventory(
                    resource_type=alloc_item.resource_type,
                    provider=alloc_item.provider.uuid,
                )

    # Do the checks again that resource constraint amounts, step sizing, min
    # and max unit are not violated
    for alloc_item in claim_obj.allocation_items:
        alloc_rt_id = lookup.resource_type_id_from_code(
            alloc_item.resource_type)
        alloc_p_id = alloc_item.provider.id
        usage = provider_usages[alloc_p_id].usages[alloc_rt_id]
        amount_needed = alloc_item.used
        total = usage.total
        reserved = usage.reserved
        allocation_ratio = usage.allocation_ratio
        min_unit = usage.min_unit
        max_unit = usage.max_unit
        step_size = usage.step_size
        total_used = usage.total_used

        # check min_unit, max_unit, step_size
        if amount_needed < min_unit:
            raise exception.MinUnitViolation(
                provider=alloc_item.provider.uuid,
                resource_type=alloc_item.resource_type,
                min_unit=min_unit,
                requested_amount=amount_needed,
            )

        if amount_needed > max_unit:
            raise exception.MaxUnitViolation(
                provider=alloc_item.provider.uuid,
                resource_type=alloc_item.resource_type,
                max_unit=max_unit,
                requested_amount=amount_needed,
            )

        if amount_needed % step_size != 0:
            raise exception.StepSizeViolation(
                provider=alloc_item.provider.uuid,
                resource_type=alloc_item.resource_type,
                step_size=step_size,
                requested_amount=amount_needed,
            )

        capacity = (total - reserved) * allocation_ratio
        if (capacity < (total_used + amount_needed)):
            raise exception.CapacityExceeded(
                provider=alloc_item.provider.uuid,
                resource_type=alloc_item.resource_type,
                requested_amount=amount_needed,
                total=total,
                total_used=total_used,
                reserved=reserved,
                allocation_ratio=allocation_ratio,
            )

    # we return a dict, keyed by provider UUID, of Provider objects that
    # contain the generation that the provider was at when it passed capacity
    # checks
    return {
        p_usage.provider.uuid: p_usage.provider
        for p_usage in provider_usages.values()
    }
コード例 #56
0
def wseries_fetch_data_single_point(request):
    """
    Return the data a given weather-point as GeoJSON.
    The given position is provided as HTTP POST/GET method.

    request: is the current mouse position (@click) from client

    :return: GeoJSON feature as HTTP response
    """

    features = []
    if request.method == 'POST':
        # get the latitude and longitude of the mouseClick event
        lat = float(request.POST.get('lat'))
        lon = float(request.POST.get('lon'))
        leaflet_id = int(request.POST.get('leaflet_id'))
        location_id = int(request.POST.get('location_id'))
        variable_id = int(request.POST.get('variable_id'))
        start_year = str(request.POST.get('start_year'))
        start_month = int(request.POST.get('start_month'))
        end_year = int(request.POST.get('end_year'))
        end_month = int(request.POST.get('end_month'))

        start_time = '{}-{:02d}-01T00:00:00'.format(start_year, start_month)
        end_time = '{}-{:02d}-01T00:00:00'.format(end_year, end_month)

        if not LOCAL_TESTING:

            oep_query = Serializer.session.query(
                open_fred_classes['Series'],
                open_fred_classes['Timespan'],
                open_fred_classes['Variable'],
                open_fred_classes['Location'],
            ) \
                .filter(
                    and_(
                        open_fred_classes['Timespan'].start >= start_time,
                        open_fred_classes['Timespan'].start <= end_time,
                        open_fred_classes['Variable'].id == variable_id,
                        open_fred_classes['Location'].id == location_id,
                    )
                ) \
                .join(open_fred_classes['Timespan']) \
                .join(open_fred_classes['Variable']) \
                .join(open_fred_classes['Location'])

            formatted_data = {}
            timespan_ids = []
            heights = []
            # for later csv file downloading
            CsvRow.objects.all().delete()
            for record in oep_query:
                timespan_id = record.Series.timespan_id

                height = record.Series.height

                if timespan_id not in timespan_ids:
                    # resets the height list for every new timespan_id
                    heights = []
                    timespan_ids.append(timespan_id)

                if height not in heights:
                    heights.append(height)

                    # resets the data values for next height index
                    values = []
                    timespans = []

                # initialize the data indexes by height
                if height not in formatted_data:
                    formatted_data[height] = dict(x=[], y=[])

                # construct the values and the timestamps associated
                # the timespan goes from start date (included) to end_date (not included)
                # in steps depending on the resolution. It is easier to rebuild it than to
                # parse it as it is inputed as intervals of datetime values
                temp_values = record.Series.values
                start_d = record.Series.timespan.start

                if isinstance(start_d, str):
                    start_d = parser.parse(start_d)

                end_d = record.Series.timespan.stop

                if isinstance(end_d, str):
                    end_d = parser.parse(end_d)

                cur_date = start_d
                t_res = record.Series.timespan.resolution

                if t_res == datetime.timedelta(0):
                    t_res += datetime.timedelta(hours=1)

                if isinstance(t_res, str):
                    dt = datetime.timedelta(minutes=TIME_STEPS[t_res])
                else:
                    dt = t_res

                idx = 0
                while cur_date <= end_d - dt:
                    y = temp_values[idx]
                    values.append(y)
                    t = datetime.datetime.isoformat(cur_date)
                    timespans.append(t)
                    # for later csv file downloading
                    CsvRow.objects.create(time=t,
                                          val=str(y),
                                          height=str(height))
                    cur_date = cur_date + dt
                    idx = idx + 1
                formatted_data[height][
                    'x'] = formatted_data[height]['x'] + timespans
                formatted_data[height][
                    'y'] = formatted_data[height]['y'] + values

            pos = geojson.Feature(geometry=Point((lon, lat)),
                                  properties=dict(
                                      location_id=location_id,
                                      heights=[str(h) for h in heights],
                                      variable=variable_id,
                                      data=formatted_data,
                                      leaflet_id=leaflet_id,
                                  ))
            features = pos
        else:
            pos = geojson.Feature(
                geometry=Point((lon, lat)),
                properties=dict(
                    location_id=location_id,
                    heights=["10.0"],
                    data={
                        10.0: {
                            'x': [
                                '2003-06-30T23:00:00', '2003-07-01T00:00:00',
                                '2003-07-01T00:00:00', '2003-07-01T01:00:00',
                                '2003-07-01T01:00:00', '2003-07-01T02:00:00'
                            ],
                            'y': [1, 3, 9, 16, 25, 36]
                        },
                        80.0: {
                            'x': [
                                '2003-06-30T23:00:00', '2003-07-01T00:00:00',
                                '2003-07-01T00:00:00', '2003-07-01T01:00:00',
                                '2003-07-01T01:00:00', '2003-07-01T02:00:00'
                            ],
                            'y': [34, 7, 2, 16, -1, 36]
                        }
                    },
                    variable=variable_id,
                    leaflet_id=leaflet_id,
                ))

            # for later csv file downloading
            CsvRow.objects.all().delete()
            for k in pos['properties']['data'].keys():
                x_data = pos['properties']['data'][k]['x']
                y_data = pos['properties']['data'][k]['y']
                for x, y in zip(x_data, y_data):
                    CsvRow.objects.create(time=x, val=str(y), height=str(k))
            features = pos

    elif request.method == 'GET':
        print(request.GET)

    return HttpResponse(dumps(features), content_type="application/json")
コード例 #57
0
def _find_providers_with_resource(ctx, acquire_time, release_time,
        resource_constraint, exclude=None, limit=50):
    """Queries for providers that have capacity for the requested amount of a
    resource type and optionally meet resource-specific capability
    constraints. The query is done in a claim start/end window.

    The SQL generated for a resource constraint without the optional capability
    constraint ends up looking like this:

    SELECT p.id, p.uuid
    FROM providers AS p
    JOIN inventories AS i
      ON p.id = i.provider_id
    LEFT JOIN (
      SELECT ai.provider_id, SUM(ai.used) AS total_used
      FROM allocation_items AS ai
      JOIN (
        SELECT id AS allocation_id
        FROM allocations
        WHERE acquire_time >= $CLAIM_START
        AND release_time < $CLAIM_END
        GROUP BY id
      ) AS allocs_in_window
        ON ai.allocation_id = allocs_in_window
      WHERE ai.resource_type_id = $RESOURCE_TYPE
      GROUP BY ai.provider_id
    ) AS usages
      ON i.provider_id = usages.provider_id
    WHERE i.resource_type_id = $RESOURCE_TYPE
    AND ((i.total - i.reserved) * i.allocation_ratio) >=
         $RESOURCE_REQUEST_AMOUNT + COALESCE(usages.used, 0))
    AND i.min_unit <= $RESOURCE_REQUEST_AMOUNT
    AND i.max_unit >= $RESOURCE_REQUEST_AMOUNT
    AND $RESOURCE_REQUEST_AMOUNT % i.step_size = 0

    If the optional `exclude` argument is provided, we tack on a:

    WHERE p.id NOT IN ($EXCLUDE)

    clause. The `exclude` argument is populated when the capabilities
    constraints that may have been previously processed identified some
    providers that should be excluded (they met an "exclusion filter" -- i.e.
    they matched for a 'forbid' specification in a constraint).
    """
    p_tbl = db.get_table('providers')
    inv_tbl = db.get_table('inventories')
    alloc_tbl = db.get_table('allocations')
    alloc_item_tbl = db.get_table('allocation_items')

    sess = db.get_session()

    rt_id = lookup.resource_type_id_from_code(
        resource_constraint.resource_type)
    alloc_window_cols = [
        alloc_tbl.c.id.label('allocation_id'),
    ]
    allocs_in_window_subq = sa.select(alloc_window_cols).where(
        sa.and_(
            alloc_tbl.c.acquire_time >= acquire_time,
            alloc_tbl.c.release_time < release_time,
        )
    ).group_by(alloc_tbl.c.id)
    allocs_in_window_subq = sa.alias(allocs_in_window_subq, "allocs_in_window")
    usage_cols = [
        alloc_item_tbl.c.provider_id,
        func.sum(alloc_item_tbl.c.used).label('total_used'),
    ]
    alloc_item_to_alloc_window = sa.outerjoin(
        alloc_item_tbl, allocs_in_window_subq,
        alloc_item_tbl.c.allocation_id == allocs_in_window_subq.c.allocation_id
    )
    usage_subq = sa.select(usage_cols).select_from(
        alloc_item_to_alloc_window
    ).where(
        alloc_item_tbl.c.resource_type_id == rt_id
    ).group_by(
        alloc_item_tbl.c.provider_id
    )
    usage_subq = sa.alias(usage_subq, "usages")

    join_to = p_tbl
    if resource_constraint.capability_constraint:
        cap_constraint = resource_constraint.capability_constraint
        join_to = _select_add_capability_constraint(ctx, p_tbl, cap_constraint)

    p_to_inv = sa.join(
        join_to, inv_tbl,
        sa.and_(
            p_tbl.c.id == inv_tbl.c.provider_id,
            inv_tbl.c.resource_type_id == rt_id,
        )
    )
    inv_to_usage = sa.outerjoin(
        p_to_inv, usage_subq,
        inv_tbl.c.provider_id == usage_subq.c.provider_id
    )
    cols = [
        p_tbl.c.id,
        p_tbl.c.uuid,
    ]
    sel = sa.select(cols).select_from(
        inv_to_usage
    ).where(
        sa.and_(
            inv_tbl.c.resource_type_id == rt_id,
            ((inv_tbl.c.total - inv_tbl.c.reserved)
                * inv_tbl.c.allocation_ratio)
            >= (resource_constraint.max_amount +
                func.coalesce(usage_subq.c.total_used, 0)),
            inv_tbl.c.min_unit <= resource_constraint.max_amount,
            inv_tbl.c.max_unit >= resource_constraint.max_amount,
            resource_constraint.max_amount % inv_tbl.c.step_size == 0,
        )
    )
    if exclude:
        sel = sel.where(~p_tbl.c.id.in_(set(exclude)))
    if limit != UNLIMITED:
        sel = sel.limit(limit)
    return {
        r[0]: r[1] for r in sess.execute(sel)
    }
コード例 #58
0
def ppr_view(request):
    """
    This function will return a geojson with all power-plants
    :return:
    """

    myfeatures = []

    if request.method == 'POST':
        region_name = str(request.POST.get('region_name'))
        generation_type = str(request.POST.get('generation_type'))

        region_nut = Serializer.regions_nuts[region_name]
        if LOCAL_TESTING is False:
            # define the table columns for query
            tbl_cols = Bundle(
                'powerplant',
                Powerplants.id,
                Powerplants.nuts,  # added
                Powerplants.version,
                Powerplants.generation_type,
                Powerplants.generation_subtype,
                Powerplants.scenario)
            # create query
            oep_query = Serializer.session.query(
                Powerplants.rea_geom_4326,
                tbl_cols
            ) \
                .filter(
                    and_(
                        tbl_cols.c.nuts.in_([region_nut]),
                        tbl_cols.c.version == EGO_DP_VERSION,
                        tbl_cols.c.scenario == EGO_DP_SCENARIO,
                        tbl_cols.c.generation_type == generation_type
                    )
                )

            print('There are ', oep_query.count(),
                  ' powerplants in the data base')

            for record in oep_query:
                pos = shape(loadswkb(str(record[0]), True))
                feature = Feature(
                    id=record.powerplant.id,
                    geometry=pos,
                    property=dict(region_nut=region_nut,
                                  region_name=region_name,
                                  generation_type=generation_type,
                                  generation_subtype=record.powerplant.
                                  generation_subtype))
                myfeatures.append(feature)

        else:
            landkreis_ids = Serializer.regions_to_landkreis[region_nut]
            for lk_id in landkreis_ids:
                lk_wkb = Serializer.landkreis_wkbs[lk_id]
                landkreis_center = loadswkb(str(lk_wkb), True).centroid

                feature = Feature(id=lk_id,
                                  geometry=landkreis_center,
                                  property=dict(
                                      region_nut=region_nut,
                                      region_name=region_name,
                                      generation_type=generation_type,
                                      generation_subtype='',
                                  ))
                myfeatures.append(feature)

    elif request.method == 'GET':
        print(request.GET)

    return HttpResponse(dumps(FeatureCollection(myfeatures)),
                        content_type="application/json")
コード例 #59
0
def report(task_id):
    # 获取筛选数据
    search_vul_type = request.args.get("search_vul_type", None)
    search_rule = request.args.get("search_rule", None)
    search_level = request.args.get("search_level", None)
    # 当前页码,默认为第一页
    page = int(request.args.get("page", 1))

    # 检测 task id 是否存在
    task_info = CobraTaskInfo.query.filter_by(id=task_id).first()
    if not task_info:
        return jsonify(status="4004", msg="report id not found.")

    # 获取task的信息
    repository = task_info.target
    task_created_at = task_info.created_at
    time_consume = task_info.time_consume
    time_start = task_info.time_start
    time_end = task_info.time_end
    files = task_info.file_count
    code_number = task_info.code_number
    if code_number is None or code_number == 0:
        code_number = u"统计中..."
    else:
        code_number = common.convert_number(code_number)

    # 把时间戳转换成datetime
    time_start = time.strftime("%H:%M:%S", time.localtime(time_start))
    time_end = time.strftime("%H:%M:%S", time.localtime(time_end))

    # 获取project信息
    project = CobraProjects.query.filter_by(repository=repository).first()
    if project is None:
        project_name = repository
        author = 'Anonymous'
        project_description = 'Compress Project'
        project_framework = 'Unknown Framework'
        project_url = 'Unknown URL'
    else:
        project_name = project.name
        author = project.author
        project_description = project.remark
        project_framework = project.framework
        project_url = project.url

    # 获取漏洞总数量
    scan_results = CobraResults.query.filter_by(task_id=task_id).all()
    total_vul_count = len(scan_results)

    # 获取出现的漏洞类型
    res = db.session.query(count().label("vul_number"), CobraVuls.name).filter(
        and_(
            CobraResults.task_id == task_id,
            CobraResults.rule_id == CobraRules.id,
            CobraVuls.id == CobraRules.vul_id,
        )).group_by(CobraVuls.name).all()
    # 提供给筛选列表
    select_vul_type = list()
    # 存下每种漏洞数量
    chart_vuls_number = list()
    for r in res:
        select_vul_type.append(r[1])
        chart_vuls_number.append({"vuls_name": r[1], "vuls_number": r[0]})

    # 获取触发的规则类型
    res = db.session.query(CobraRules.description).filter(
        and_(CobraResults.task_id == task_id,
             CobraResults.rule_id == CobraRules.id,
             CobraVuls.id == CobraRules.vul_id)).group_by(
                 CobraRules.description).all()
    select_rule_type = list()
    for r in res:
        select_rule_type.append(r[0])

    # 检索不同等级的漏洞数量
    res = db.session.query(count().label('vuln_number'),
                           CobraRules.level).filter(
                               and_(
                                   CobraResults.task_id == task_id,
                                   CobraResults.rule_id == CobraRules.id,
                                   CobraVuls.id == CobraRules.vul_id,
                               )).group_by(CobraRules.level).all()
    low_amount = medium_amount = high_amount = unknown_amount = 0
    for every_level in res:
        """
        低危:1
        中危:2
        高危:3
        未定义:其他值
        """
        if every_level[1] == 1:
            low_amount = every_level[0]
        elif every_level[1] == 2:
            medium_amount = every_level[0]
        elif every_level[1] == 3:
            high_amount = every_level[0]
        else:
            unknown_amount = every_level[0]

    # 检索全部的漏洞信息
    filter_group = (
        CobraResults.task_id == task_id,
        CobraResults.rule_id == CobraRules.id,
        CobraVuls.id == CobraRules.vul_id,
    )

    # 根据传入的筛选条件添加SQL的条件
    if search_vul_type is not None and search_vul_type != "all":
        filter_group += (CobraVuls.name == search_vul_type, )
    if search_rule is not None and search_rule != "all":
        filter_group += (CobraRules.description == search_rule, )
    if search_level is not None and search_level != "all":
        filter_group += (CobraRules.level == search_level, )

    # 构建SQL语句
    all_scan_results = db.session.query(
        CobraResults.file, CobraResults.line, CobraResults.code,
        CobraRules.description, CobraRules.level, CobraRules.regex_location,
        CobraRules.regex_repair, CobraRules.repair,
        CobraVuls.name).filter(*filter_group)
    page_size = 5
    total_number = all_scan_results.all()
    total_pages = len(total_number) / page_size + 1
    all_scan_results = all_scan_results.limit(page_size).offset(
        (page - 1) * page_size).all()

    # 处理漏洞信息
    vulnerabilities = list()
    map_level = ["未定义", "低危", "中危", "高危"]
    map_color = ["#555", "black", "orange", "red"]
    current_url = ''
    for result in all_scan_results:

        # 生成data数据
        data_dict = dict()
        data_dict["file"] = result[0]
        data_dict["line"] = result[1]
        data_dict["code"] = result[2]
        data_dict["rule"] = result[3]
        data_dict["level"] = map_level[result[4]]
        data_dict["color"] = map_color[result[4]]
        data_dict["repair"] = result[7]
        data_dict['verify'] = ''
        if project_framework != '':
            for rule in detection.Detection().rules:
                if rule['name'] == project_framework:
                    if 'public' in rule:
                        if result.file[:len(rule['public'])] == rule['public']:
                            data_dict[
                                'verify'] = project_url + result.file.replace(
                                    rule['public'], '')

        # 检索vulnerabilities中是否存在vul_type的类别
        # 如果存在就添加到对应的data字典中
        # 否则就新建一下
        found = False
        for v in vulnerabilities:
            if v["vul_type"] == result[-1]:
                # 直接添加
                v["data"].append(data_dict)
                # 修改标志
                found = True
                break
        # 没有找到
        if not found:
            temp_dict = dict(vul_type=result[-1], data=list())
            temp_dict["data"].append(data_dict)
            vulnerabilities.append(temp_dict)

        current_url = request.url.replace("&page={}".format(page),
                                          "").replace("page={}".format(page),
                                                      "")
        if "?" not in current_url:
            current_url += "?"

    data = {
        'id': int(task_id),
        'project_name': project_name,
        'project_repository': repository,
        'project_description': project_description,
        'project_url': project_url,
        'project_framework': project_framework,
        'author': author,
        'task_created_at': task_created_at,
        'time_consume': common.convert_time(time_consume),
        'time_start': time_start,
        'time_end': time_end,
        'files': common.convert_number(files),
        'code_number': code_number,
        'vul_count': common.convert_number(total_vul_count),
        'vulnerabilities': vulnerabilities,
        "select_vul_type": select_vul_type,
        "select_rule_type": select_rule_type,
        "chart_vuls_number": chart_vuls_number,
        "current_page": page,
        "total_pages": total_pages,
        "filter_vul_number": len(total_number),
        "current_url": current_url,
        'amount': {
            'h': high_amount,
            'm': medium_amount,
            'l': low_amount,
            'u': unknown_amount
        },
    }
    return render_template('report.html', data=data)
コード例 #60
0
def _select_add_capability_constraint(ctx, relation, constraint):
    """Adds the following expression to the supplied SELECT statement:

    if "any" is in the constraint or if there's only one cap in "require":

        JOIN provider_capabilities AS pc
        ON providers.id = pc.provider_id
        AND pc.capability_id IN ($ANY_CAPS)

    if "require" is in the constraint and there's >1 required cap:

        JOIN (
            SELECT pc.provider_id, COUNT(*) AS num_caps
            FROM provider_capabilities AS pc
            GROUP BY pc.provider_id
            HAVING COUNT(*) = $NUM_REQUIRE_CAPS
        ) AS provs_having_all
        ON providers.id = provs_having_all.provider_id

    if "forbid" is in the constraint:

        JOIN provider_capabilities AS pc
        ON providers.id = pc.provider_id
        AND pc.capability_id NOT IN ($FORBID_CAPS)

    """
    p_tbl = db.get_table('providers')
    p_caps_tbl = db.get_table('provider_capabilities')
    p_caps_tbl = sa.alias(p_caps_tbl, name='pc')
    if constraint.require_caps:
        if len(constraint.require_caps) == 1:
            cap_id = lookup.capability_id_from_code(constraint.require_caps[0])
            # Just join to placement_capabilities and be done with it. No need
            # to get more complicated than that.
            relation = sa.join(
                p_tbl, p_caps_tbl,
                sa.and_(
                    p_tbl.c.id == p_caps_tbl.c.provider_id,
                    p_caps_tbl.c.capability_id == cap_id
                )
            )
        else:
            # This is the complicated bit. We join to a derived table
            # representing the providers that have ALL of the required
            # capabilities.
            require_cap_ids = [
                lookup.capability_id_from_code(cap)
                for cap in constraint.require_caps
            ]
            cols = [
                p_caps_tbl.c.provider_id,
                func.count(p_caps_tbl.c.capability_id).label('num_caps')
            ]
            derived = sa.select(cols).group_by(
                p_caps_tbl.c.provider_id
            ).where(
                p_caps_tbl.c.capability_id.in_(require_cap_ids)
            ).having(
                func.count(p_caps_tbl.c.capability_id) == len(require_cap_ids)
            )
            relation = sa.join(
                p_tbl, derived,
                p_tbl.c.id == derived.c.provider_id,
            )
    if constraint.forbid_caps or constraint.any_caps:
        conds = [
            p_tbl.c.id == p_caps.c.provider_id,
        ]
        if constraint.forbid_caps:
            forbid_cap_ids = [
                lookup.capability_id_from_code(cap)
                for cap in constraint.forbid_caps
            ]
            conds.append(
                ~p_caps_tbl.c.capability_id.in_(forbid_cap_ds)
            )
        if constraint.any_caps:
            any_cap_ids = [
                lookup.capability_id_from_code(cap)
                for cap in constraint.any_caps
            ]
            conds.append(
                p_caps_tbl.c.capability_id.in_(any_cap_ds)
            )
        relation = sa.join(
            relation, p_caps_tbl,
            sa.and_(conds),
        )
    return relation