Пример #1
0
 def __init__(self, dbsession, user):
     self.dbsession = dbsession
     self.user = user
     self.query = self.dbsession.query(
         func.count(Message.id).label('total'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.nameinfected == 0, Message.otherinfected == 0,
             Message.spam == 0, Message.highspam == 0), 1)],
             else_=0)).label('clean'),
         func.sum(case([(Message.virusinfected > 0, 1)],
             else_=0)).label('virii'),
         func.sum(case([(and_(Message.highspam == 0,
             Message.spam == 0, Message.virusinfected == 0,
             or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)],
             else_=0)).label('infected'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             or_(Message.spam > 0, Message.highspam > 0)), 1)],
             else_=0)).label('spam'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             Message.spam > 0, Message.highspam == 0), 1)],
             else_=0)).label('lowspam'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             Message.highspam > 0), 1)],
             else_=0)).label('highspam'))\
             .filter(Message.timestamp.between(
                     ustartday(self.user.timezone),
                     uendday(self.user.timezone)))
Пример #2
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    memberships = aliased(model.Member)

    query = DB.query(model.Group,
        func.count(distinct(model.Package.id)).label('nb_datasets'),
        func.count(distinct(memberships.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(model.Package, and_(
        model.Group.id == model.Package.owner_org,
        ~model.Package.private,
        model.Package.state == 'active',
    ))
    query = query.outerjoin(memberships, and_(
        memberships.group_id == model.Group.id,
        memberships.state == 'active',
        memberships.table_name == 'user'
    ))
    query = query.filter(model.Group.state == 'active')
    query = query.filter(model.Group.approval_status == 'approved')
    query = query.filter(model.Group.is_organization == True)
    query = query.group_by(model.Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        model.Group.title
    )
    return query
Пример #3
0
def check_mcm(sequence_key):

	#kdrew: check to see if 5 entries uploaded into mcm
	new_scop = "1.75"
	new_timestamp = "2010-06-18"
	old_scop = "1.69"
	

	mcmdata_new = session.query(McmData).filter(and_(
		McmData.sequence_key == sequence_key,
		McmData.scop == new_scop,
		McmData.timestamp >= new_timestamp
		)).all()
	new_prob_sum = 0
	for data in mcmdata_new:
		new_prob_sum += data.probability
	avg_new_prob = new_prob_sum/len(mcmdata_new) if len(mcmdata_new) else 0

	mcmdata_old = session.query(McmData).filter(and_(
		McmData.sequence_key == sequence_key,
		McmData.scop == old_scop
		)).all()
	old_prob_sum = 0
	for data in mcmdata_old:
		old_prob_sum += data.probability
	avg_old_prob = old_prob_sum/len(mcmdata_old) if len(mcmdata_old) else 0

	return True if len(mcmdata_new) >= 5 else False, True if avg_new_prob >= avg_old_prob else False
Пример #4
0
 def get_pixbuf (self, attr,val):
     if attr=='category':            
         tbl = self.rd.recipe_table.join(self.rd.categories_table)
         col = self.rd.categories_table.c.category
         if hasattr(self,'category_images'):
             stment = and_(col==val,self.rd.recipe_table.c.image!=None,
                           self.rd.recipe_table.c.image!='',
                           not_(self.rd.recipe_table.c.title.in_(self.category_images))
                           )
         else:
             stment = and_(col==val,self.rd.recipe_table.c.image!=None,self.rd.recipe_table.c.image!='')
         result = tbl.select(stment,limit=1).execute().fetchone()
         if not hasattr(self,'category_images'): self.category_images = []
         if result: self.category_images.append(result.title)
     elif attr=='rating':
         return star_generator.get_pixbuf(val)
     elif attr in ['preptime','cooktime']:
         return get_time_slice(val)
     else:
         tbl = self.rd.recipe_table
         col = getattr(self.rd.recipe_table.c,attr)
         stment = and_(col==val,self.rd.recipe_table.c.image!=None,self.rd.recipe_table.c.image!='')
         result = tbl.select(stment,limit=1).execute().fetchone()
     if result and result.thumb:
         return scale_pb(get_pixbuf_from_jpg(result.image))
     else:
         return self.get_base_icon(attr) or self.get_base_icon('category')
Пример #5
0
 def get_relationships(self, with_package=None, type=None, active=True,
                       direction='both'):
     '''Returns relationships this package has.
     Keeps stored type/ordering (not from pov of self).'''
     assert direction in ('both', 'forward', 'reverse')
     if with_package:
         assert isinstance(with_package, Package)
     from package_relationship import PackageRelationship
     forward_filters = [PackageRelationship.subject==self]
     reverse_filters = [PackageRelationship.object==self]
     if with_package:
         forward_filters.append(PackageRelationship.object==with_package)
         reverse_filters.append(PackageRelationship.subject==with_package)
     if active:
         forward_filters.append(PackageRelationship.state==core.State.ACTIVE)
         reverse_filters.append(PackageRelationship.state==core.State.ACTIVE)
     if type:
         forward_filters.append(PackageRelationship.type==type)
         reverse_type = PackageRelationship.reverse_type(type)
         reverse_filters.append(PackageRelationship.type==reverse_type)
     q = meta.Session.query(PackageRelationship)
     if direction == 'both':
         q = q.filter(or_(
         and_(*forward_filters),
         and_(*reverse_filters),
         ))
     elif direction == 'forward':
         q = q.filter(and_(*forward_filters))
     elif direction == 'reverse':
         q = q.filter(and_(*reverse_filters))
     return q.all()
Пример #6
0
def useredit(request, userid):
    info = ''
    error = ''
    print userid
    data = session.query(UserSL).filter(
                        and_(UserSL.id == userid,
                        )).first()
    if request.method == 'POST':
        form = UserInfoForm(request.POST)
        if form.is_valid():
            try:
                a = session.query(UserSL).filter(
                        and_(
                            UserSL.account == form.cleaned_data['account'],
                            UserSL.id != userid,
                            ),
                    ).all()                
                if len(a) > 0:
                    error = u"登陆名已存在!"
                else:                    
                    update_model(data, form.cleaned_data)
                    session.commit()
                    info = '保存成功!'
                    return HttpResponseRedirect('/docview/manage/user/')
            except Exception,e:
                error = u"信息修改失败.请联系管理员"
                session.rollback()
                printError()
        else:
            field, einfo = form.errors.items()[0]
            error = form[field].label +":" +  einfo.as_text()              
    def getRunResultCount(self, run_id, report_filters):

        filter_expression = construct_report_filter(report_filters)

        session = self.__session
        try:
            reportCount = session.query(Report) \
                .filter(Report.run_id == run_id) \
                .outerjoin(File,
                           and_(Report.file_id == File.id,
                                File.run_id == run_id)) \
                .outerjoin(BugPathEvent,
                           Report.end_bugevent == BugPathEvent.id) \
                .outerjoin(SuppressBug,
                           and_(SuppressBug.hash == Report.bug_id,
                                SuppressBug.run_id == run_id)) \
                .filter(filter_expression)\
                .count()

            if reportCount is None:
                reportCount = 0

            return reportCount

        except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
            msg = str(alchemy_ex)
            LOG.error(msg)
            raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE, msg)
Пример #8
0
    def by_pm_with(self, query, operand, maybe_negate):
        # type: (Query, str, ConditionTransform) -> Query
        if ',' in operand:
            # Huddle
            try:
                emails = [e.strip() for e in operand.split(',')]
                recipient = recipient_for_emails(emails, False,
                                                 self.user_profile, self.user_profile)
            except ValidationError:
                raise BadNarrowOperator('unknown recipient ' + operand)
            cond = column("recipient_id") == recipient.id
            return query.where(maybe_negate(cond))
        else:
            # Personal message
            self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
            if operand == self.user_profile.email:
                # Personals with self
                cond = and_(column("sender_id") == self.user_profile.id,
                            column("recipient_id") == self_recipient.id)
                return query.where(maybe_negate(cond))

            # Personals with other user; include both directions.
            try:
                narrow_profile = get_user_profile_by_email(operand)
            except UserProfile.DoesNotExist:
                raise BadNarrowOperator('unknown user ' + operand)

            narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
            cond = or_(and_(column("sender_id") == narrow_profile.id,
                            column("recipient_id") == self_recipient.id),
                       and_(column("sender_id") == self.user_profile.id,
                            column("recipient_id") == narrow_recipient.id))
            return query.where(maybe_negate(cond))
Пример #9
0
def rulesCallback(ip,port,rules):
	serverid = getServerId(ip,port)

	for rule in rules:
		if rule[0] == 'tickrate':
			db.execute(
				update(tbl_server_history)
				.where(
					and_(
						tbl_server_history.c.id==serverid,
						tbl_server_history.c.date==tm,
					)
				)
				.values(
					tickrate=rule[1]
				)
			)
		elif rule[0] == 'ent_count':
			db.execute(
				update(tbl_server_history)
				.where(
					and_(
						tbl_server_history.c.id==serverid,
						tbl_server_history.c.date==tm,
					)
				)
				.values(
					ent_count=rule[1].replace(',','')
				)
			)
Пример #10
0
    def get_measures(self):
        """Find all data that should be included in the report.

        The data is returned as a list of tuples containing a
        :py:class:`Module <euphorie.client.model.Module>`,
        :py:class:`Risk <euphorie.client.model.Risk>` and
        :py:class:`ActionPlan <euphorie.client.model.ActionPlan>`. Each
        entry in the list will correspond to a row in the generated Excel
        file.

        This implementation differs from Euphorie in its ordering:
        it sorts on risk priority instead of start date.
        """
        query = (
            Session.query(model.Module, model.Risk, model.ActionPlan)
            .filter(sql.and_(model.Module.session == self.session, model.Module.profile_index > -1))
            .filter(sql.not_(model.SKIPPED_PARENTS))
            .filter(sql.or_(model.MODULE_WITH_RISK_OR_TOP5_FILTER, model.RISK_PRESENT_OR_TOP5_FILTER))
            .join(
                (
                    model.Risk,
                    sql.and_(
                        model.Risk.path.startswith(model.Module.path),
                        model.Risk.depth == model.Module.depth + 1,
                        model.Risk.session == self.session,
                    ),
                )
            )
            .join((model.ActionPlan, model.ActionPlan.risk_id == model.Risk.id))
            .order_by(sql.case(value=model.Risk.priority, whens={"high": 0, "medium": 1}, else_=2), model.Risk.path)
        )
        return query.all()
Пример #11
0
def can_merge_tracks(conn, track_ids):
    fp1 = schema.fingerprint.alias('fp1')
    fp2 = schema.fingerprint.alias('fp2')
    join_cond = sql.and_(fp1.c.id < fp2.c.id, fp1.c.track_id < fp2.c.track_id)
    src = fp1.join(fp2, join_cond)
    cond = sql.and_(fp1.c.track_id.in_(track_ids), fp2.c.track_id.in_(track_ids))
    query = sql.select([
        fp1.c.track_id, fp2.c.track_id,
        sql.func.max(sql.func.abs(fp1.c.length - fp2.c.length)),
        sql.func.min(sql.func.acoustid_compare2(fp1.c.fingerprint, fp2.c.fingerprint, const.TRACK_MAX_OFFSET)),
    ], cond, from_obj=src).group_by(fp1.c.track_id, fp2.c.track_id).order_by(fp1.c.track_id, fp2.c.track_id)
    rows = conn.execute(query)
    merges = {}
    for fp1_id, fp2_id, length_diff, score in rows:
        if score < const.TRACK_GROUP_MERGE_THRESHOLD:
            continue
        if length_diff > const.FINGERPRINT_MAX_LENGTH_DIFF:
            continue
        group = fp1_id
        if group in merges:
            group = merges[group]
        merges[fp2_id] = group
    result = []
    for group in set(merges.values()):
        result.append(set([group] + [i for i in merges if merges[i] == group]))
    return result
Пример #12
0
 def get_data_point_at_datetime(self, measurement_id, point_name, date_time, interpolation_type="step"):
     if interpolation_type == "step":
         sel = (
             select([self.DataPoints])
             .where(
                 and_(
                     and_(
                         self.DataPoints.c.measurement_id == measurement_id,
                         or_(
                             self.DataPoints.c.point_name_long.like(point_name),
                             self.DataPoints.c.point_name_short.like(point_name),
                         ),
                     ),
                     self.DataPoints.c.point_measured <= date_time,
                 )
             )
             .order_by(desc(self.DataPoints.c.point_measured))
         )
         result = self.connection.execute(sel)
         row = result.fetchone()
         sign, mantissa, exponent, bytecount = (
             row["point_sign"],
             row["point_mantissa"],
             row["point_exponent"],
             row["point_bytecount"],
         )
         data_point = mpmath.mpf((sign, mantissa, exponent, bytecount))
     else:
         data_points, data_points_measured = self.get_data_points(measurement_id, point_name)
         data_points_measured = data_points_measured.astype("datetime64[us]").astype("d")
         Interp = interp1d(data_points_measured, data_points)
         data_point = Interp(np.datetime64(date_time, "us").astype("d"))
     return data_point, date_time
Пример #13
0
def all_appointments():
    """Returns a json object which contains all appointments in a specific
    date.
    """
    if not request.args.get('date'):
        date_obj = date.today()
    else:
        date_obj = datetime.strptime(request.args.get('date'),
                                     "%Y-%m-%d").date()
    timezone = float(str(request.args.get('timezone', 0.00)))
    start_time = get_utc_seconds(date_obj, 0, timezone)
    end_time = get_utc_seconds(date_obj, 1440, timezone)

    conn = db.engine.connect()
    query = select([Appointment],
                   or_(and_(Appointment.start_time >= start_time,
                            Appointment.start_time <= end_time),
                       and_(Appointment.end_time >= start_time,
                            Appointment.end_time <= end_time))).\
        order_by(Appointment.start_time)
    result = conn.execute(query).fetchall()

    apt_time_utc_seconds = [[a.start_time, a.end_time] for a in result]
    apt_time_slider_minutes = [[get_local_minutes(a[0], date_obj, timezone),
                                get_local_minutes(a[1], date_obj, timezone)]
                               for a in apt_time_utc_seconds]

    return jsonify(apt_time_utc_seconds=apt_time_utc_seconds,
                   apt_time_slider_minutes=apt_time_slider_minutes,
                   date=str(date_obj),
                   timezone=timezone)
Пример #14
0
    def claim_code(cls, user, deal):
        # check if they already have a code for this deal and return it
        try:
            result = (Session.query(cls)
                      .filter(and_(cls.user == user._id,
                                   cls.deal == deal))
                      .one())
            return result.code
        except NoResultFound:
            pass

        # select an unclaimed code, assign it to the user, and return it
        try:
            claiming = (Session.query(cls)
                        .filter(and_(cls.deal == deal,
                                     cls.user == None,
                                     func.pg_try_advisory_lock(cls.id)))
                        .limit(1)
                        .one())
        except NoResultFound:
            raise GoldPartnerCodesExhaustedError

        claiming.user = user._id
        claiming.date = datetime.now(g.tz)
        Session.add(claiming)
        Session.commit()

        # release the lock
        Session.query(func.pg_advisory_unlock_all()).all()

        return claiming.code 
Пример #15
0
def get_scan_information():
    if not ValidateClass.check_login():
        return redirect(ADMIN_URL + '/index')

    if request.method == "POST":
        start_time_stamp = request.form.get("start_time_stamp")[0:10]
        end_time_stamp = request.form.get("end_time_stamp")[0:10]
        start_time_array = datetime.datetime.fromtimestamp(int(start_time_stamp))
        end_time_array = datetime.datetime.fromtimestamp(int(end_time_stamp))

        if start_time_stamp >= end_time_stamp:
            return jsonify(tag="danger", msg="wrong date select.", code=1002)

        task_count = CobraTaskInfo.query.filter(
            and_(CobraTaskInfo.time_start >= start_time_stamp, CobraTaskInfo.time_start <= end_time_stamp)
        ).count()
        vulns_count = CobraResults.query.filter(
            and_(CobraResults.created_at >= start_time_array, CobraResults.created_at <= end_time_array)
        ).count()
        projects_count = CobraProjects.query.filter(
            and_(CobraProjects.last_scan >= start_time_array, CobraProjects.last_scan <= end_time_array)
        ).count()
        files_count = db.session.query(func.sum(CobraTaskInfo.file_count).label('files')).filter(
            and_(CobraTaskInfo.time_start >= start_time_stamp, CobraTaskInfo.time_start <= end_time_stamp)
        ).first()[0]
        code_number = db.session.query(func.sum(CobraTaskInfo.code_number).label('codes')).filter(
            and_(CobraTaskInfo.time_start >= start_time_stamp, CobraTaskInfo.time_start <= end_time_stamp)
        ).first()[0]

        return jsonify(code=1001, task_count=task_count, vulns_count=vulns_count, projects_count=projects_count,
                       files_count=int(files_count), code_number=int(code_number))
Пример #16
0
def tree_stats(request, treedef, tree, parentid):
    tree_table = datamodel.get_table(tree)
    parentid = None if parentid == 'null' else int(parentid)

    node = getattr(models, tree_table.name)
    descendant = aliased(node)
    node_id = getattr(node, node._id)
    descendant_id = getattr(descendant, node._id)
    treedef_col = tree_table.name + "TreeDefID"

    same_tree_p = getattr(descendant, treedef_col) == int(treedef)
    is_descendant_p = sql.and_(
        sql.between(descendant.nodeNumber, node.nodeNumber, node.highestChildNodeNumber),
        same_tree_p)

    target, make_joins = getattr(StatsQuerySpecialization, tree)()
    target_id = getattr(target, target._id)

    direct_count = sql.cast(
        sql.func.sum(sql.case([(sql.and_(target_id != None, descendant_id == node_id), 1)], else_=0)),
        types.Integer)

    all_count = sql.func.count(target_id)

    with models.session_context() as session:
        query = session.query(node_id, direct_count, all_count) \
                            .join(descendant, is_descendant_p) \
                            .filter(node.ParentID == parentid) \
                            .group_by(node_id)

        query = make_joins(request.specify_collection, query, descendant_id)
        results = list(query)

    return HttpResponse(toJson(results), content_type='application/json')
Пример #17
0
def start(conf):
    # connect to db
    db.engine = engine = engine_from_config(dict(conf.items('sqlalchemy')), prefix='')
    db.metadata.bind = engine
    conn = engine.connect()

    Session = sessionmaker(bind=engine)
    session = Session()
    
    profiles = []
    topics = []
    for user in session.query(User):
        for profile in user.profiles:
            if profile.origin == 5:
                profiles.append(profile.profile_id)
        for topic in user.topics:
            if topic.profile_id in profiles:
                topics.append(topic.topic_id)

    for topic_id in topics:
        print "checking", topic_id
        s = select([func.count(db.t_message.c.message_id)], and_(db.t_message.c.origin == 5, db.t_message.c.topic_id == topic_id))
        (count,) = conn.execute(s).fetchone()
        if count > 1000:
            (m_id,) = conn.execute(select([db.t_message.c.message_id],
                                           db.t_message.c.topic_id == topic_id).order_by(
                                                    db.t_message.c.message_id.desc()).offset(1000).limit(1)).fetchone()
            print "purging", topic_id, count, m_id
            conn.execute(db.t_message.delete().where(and_(db.t_message.c.message_id < m_id, db.t_message.c.topic_id == topic_id)))
Пример #18
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    query = DB.query(Group,
        func.count(distinct(Package.id)).label('nb_datasets'),
        func.count(distinct(Member.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(Package, and_(
        Group.id == Package.owner_org,
        ~Package.private,
        Package.state == 'active',
    ))
    query = query.outerjoin(Member, and_(
        Member.group_id == Group.id,
        Member.state == 'active',
        Member.table_name == 'user'
    ))
    query = query.filter(Group.state == 'active')
    query = query.filter(Group.approval_status == 'approved')
    query = query.filter(Group.is_organization == True)
    query = query.group_by(Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        Group.title
    )
    query = query.options(orm.joinedload(Group.certified_public_service))
    return query
Пример #19
0
        def make_joins(collection, query, descendant_id):
            pc_target = collection.discipline.paleocontextchildtable
            join_col = pc.ChronosStratID if chronos_or_litho == 'chronos' else pc.LithoStratID

            query = query.outerjoin(pc, join_col == descendant_id)

            if pc_target == "collectionobject":
                return query.outerjoin(co, sql.and_(
                    co.PaleoContextID == getattr(pc, pc._id),
                    co.collectionMemberId == collection.id))

            if pc_target == "collectingevent":
                return query.outerjoin(ce, ce.PaleoContextID == getattr(pc, pc._id)) \
                        .outerjoin(co, sql.and_(
                    co.CollectingEventID == getattr(ce, ce._id),
                    co.collectionMemberId == collection.id))

            if pc_target == "locality":
                return query.outerjoin(loc, loc.PaleoContextID == getattr(pc, pc._id)) \
                       .outerjoin(ce, ce.LocalityID == getattr(loc, loc._id)) \
                       .outerjoin(co, sql.and_(
                    co.CollectingEventID == getattr(ce, ce._id),
                    co.collectionMemberId == collection.id))

            raise Exception('unknown paleocontext join table: %s' % pc_target)
    def validate_edware_prod(self):
        with get_prod_connection(self.tenant) as connection:
            fact_table = connection.get_table('fact_asmt_outcome_vw')
            dim_student = connection.get_table('dim_student')
            update_output_data = select([fact_table.c.rec_status], and_(fact_table.c.student_id == 'f7251065-ca82-4248-9397-cc722e97bbdc', fact_table.c.asmt_guid == 'a685f0ec-a0a6-4b1e-93b8-0c4298ff6374'))
            update_output_table = connection.execute(update_output_data).fetchall()
            self.assertIn(('D',), update_output_table, "Delete status D is not found in the Update record")
            self.assertIn(('C',), update_output_table, "Insert status C is not found in the Update record")
            # verify update asmt_score in fact_table

            update_asmt_score = select([fact_table.c.asmt_score], and_(fact_table.c.student_id == 'f7251065-ca82-4248-9397-cc722e97bbdc', fact_table.c.rec_status == 'C', fact_table.c.asmt_guid == 'a685f0ec-a0a6-4b1e-93b8-0c4298ff6374'))
            new_asmt_score = connection.execute(update_asmt_score).fetchall()
            expected_asmt_score = [(1900,)]
            # verify that score is updated in fact_Asmt
            self.assertEquals(new_asmt_score, expected_asmt_score)
            # verify that there is only one record with status C
            self.assertEquals(len(new_asmt_score), 1)

            # verification for dim_student update : last name change to Bush
            update_last_name = select([dim_student.c.last_name], and_(dim_student.c.student_id == 'f7251065-ca82-4248-9397-cc722e97bbdc', dim_student.c.batch_guid == self.guid_batch_id, dim_student.c.rec_status == "C"))
            result_dim_student = connection.execute(update_last_name).fetchall()
            expected_last_name = [('Bush',)]
            self.assertEquals(result_dim_student, expected_last_name)
            # verify that old recod is deactive
            inactive_rec = select([dim_student], and_(dim_student.c.student_id == 'f7251065-ca82-4248-9397-cc722e97bbdc', dim_student.c.rec_status == "I"))
            inactive_result = connection.execute(inactive_rec).fetchall()
            print(len(inactive_result))
Пример #21
0
def filterQueryByDateRestrictor(query, dateRestrictor, tableName):
    """Returns the query filtered by the date restrictor, e.g., 'date elicited
    is earlier than 2011-11-11'.

    """

    location = dateRestrictor['location']

    relation = dateRestrictor['relation']

    date = dateRestrictor['date']
    date = datetime.datetime.combine(date, datetime.time())

    tbl = getattr(model, tableName)
    col = getattr(tbl, location)

    if relation == '' or relation == 'not_':
        nextDay = date + datetime.timedelta(1)
        previousDay = date - datetime.timedelta(1)

        if relation == '':
            filterCondition = and_(col > previousDay, col < nextDay)
        else:
            filterCondition = not_(and_(col > previousDay, col < nextDay))

    elif relation == 'earlier_than':
        filterCondition = col < date

    else:
        filterCondition = col > date

    return query.filter(filterCondition)
Пример #22
0
  def properties(self, name):
    connection = self._client.connect()
    rval = {}

    for interval,config in self._intervals.items():
      rval.setdefault(interval, {})

      stmt = select([self._table.c.i_time]).where(
        and_(
          self._table.c.name==name,
          self._table.c.interval==interval
        )
      ).order_by( asc(self._table.c.i_time) ).limit(1)
      rval[interval]['first'] = config['i_calc'].from_bucket(
        connection.execute(stmt).first()['i_time'] )

      stmt = select([self._table.c.i_time]).where(
        and_(
          self._table.c.name==name,
          self._table.c.interval==interval
        )
      ).order_by( desc(self._table.c.i_time) ).limit(1)
      rval[interval]['last'] = config['i_calc'].from_bucket(
        connection.execute(stmt).first()['i_time'] )

    return rval
Пример #23
0
 def has_member(cls, user): #pylint: disable=E0213
     return or_(
             and_(cls.membership_type != GroupMembershipType.inverted,
                  cls.user_group_assocs.any(UserGroup.user == user)),
             and_(cls.membership_type == GroupMembershipType.inverted,
                  not_(cls.excluded_user_group_assocs.any(ExcludedUserGroup.user == user)))
             )
Пример #24
0
 def __init__(self, dbsession, user):
     self.dbsession = dbsession
     self.user = user
     self.query = self.dbsession.query(
         func.count(Message.id).label('total'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.nameinfected == 0, Message.otherinfected == 0,
             Message.spam == 0, Message.highspam == 0), 1)],
             else_=0)).label('clean'),
         func.sum(case([(Message.virusinfected > 0, 1)],
             else_=0)).label('virii'),
         func.sum(case([(and_(Message.highspam == 0,
             Message.spam == 0, Message.virusinfected == 0,
             or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)],
             else_=0)).label('infected'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             or_(Message.spam > 0, Message.highspam > 0)), 1)],
             else_=0)).label('spam'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             Message.spam > 0, Message.highspam == 0), 1)],
             else_=0)).label('lowspam'),
         func.sum(case([(and_(Message.virusinfected == 0,
             Message.otherinfected == 0, Message.nameinfected == 0,
             Message.highspam > 0), 1)],
             else_=0)).label('highspam'))\
             .filter(Message.date == now().date())
Пример #25
0
def export_modified_cells(self, hourly=True, bucket=None):
    if bucket is None:  # pragma: no cover
        bucket = self.app.s3_settings['assets_bucket']
    now = util.utcnow()

    if hourly:
        end_time = now.replace(minute=0, second=0)
        file_time = end_time
        file_type = 'diff'
        start_time = end_time - timedelta(hours=1)
        cond = and_(cell_table.c.modified >= start_time,
                    cell_table.c.modified < end_time,
                    cell_table.c.cid != CELLID_LAC,
                    cell_table.c.lat.isnot(None))
    else:
        file_time = now.replace(hour=0, minute=0, second=0)
        file_type = 'full'
        cond = and_(cell_table.c.cid != CELLID_LAC,
                    cell_table.c.lat.isnot(None))

    filename = 'MLS-%s-cell-export-' % file_type
    filename = filename + file_time.strftime('%Y-%m-%dT%H0000.csv.gz')
    try:
        with selfdestruct_tempdir() as d:
            path = os.path.join(d, filename)
            with self.db_session() as sess:
                write_stations_to_csv(sess, cell_table, CELL_COLUMNS, cond,
                                      path, make_cell_export_dict, CELL_FIELDS)
            write_stations_to_s3(path, bucket)
    except Exception as exc:  # pragma: no cover
        self.heka_client.raven('error')
        raise self.retry(exc=exc)
Пример #26
0
 def __eq__(self, other):
     if other is None:
         if self.prop.direction in [ONETOMANY, MANYTOMANY]:
             return ~sql.exists([1], self.prop.primaryjoin)
         else:
             return self.prop._optimized_compare(None)
     elif self.prop.uselist:
         if not hasattr(other, "__iter__"):
             raise exceptions.InvalidRequestError(
                 "Can only compare a collection to an iterable object.  Use contains()."
             )
         else:
             j = self.prop.primaryjoin
             if self.prop.secondaryjoin:
                 j = j & self.prop.secondaryjoin
             clauses = []
             for o in other:
                 clauses.append(
                     sql.exists(
                         [1],
                         j
                         & sql.and_(
                             *[
                                 x == y
                                 for (x, y) in zip(
                                     self.prop.mapper.primary_key, self.prop.mapper.primary_key_from_instance(o)
                                 )
                             ]
                         ),
                     )
                 )
             return sql.and_(*clauses)
     else:
         return self.prop._optimized_compare(other)
Пример #27
0
 def __eq__(self, other):
     if other is None:
         return sql.and_(*[a==None for a in self.prop.columns])
     else:
         return sql.and_(*[a==b for a, b in
                           zip(self.prop.columns,
                               other.__composite_values__())])
Пример #28
0
    def generate_query_from_keywords(self, model, fulltextsearch=None,
                                     **kwargs):
        clauses = [_entity_descriptor(model, key) == value
                       for key, value in kwargs.items()
                       if (key != 'info' and key != 'fav_user_ids'
                            and key != 'created' and key != 'project_id')]
        queries = []
        headlines = []
        order_by_ranks = []
        or_clauses = []

        if 'info' in kwargs.keys():
            queries, headlines, order_by_ranks = self.handle_info_json(model, kwargs['info'],
                                                                       fulltextsearch)
            clauses = clauses + queries

        if 'created' in kwargs.keys():
            like_query = kwargs['created'] + '%'
            clauses.append(_entity_descriptor(model,'created').like(like_query))

        if 'project_id' in kwargs.keys():
            tmp = "%s" % kwargs['project_id']
            project_ids = re.findall(r'\d+', tmp)
            for project_id in project_ids:
                or_clauses.append((_entity_descriptor(model, 'project_id') ==
                                   project_id))
        all_clauses = and_(and_(*clauses), or_(*or_clauses))
        return (all_clauses,), queries, headlines, order_by_ranks
Пример #29
0
    def statistics_update(self, node, population, size, mtime, cluster=0):
        """Update the statistics of the given node.
           Statistics keep track the population, total
           size of objects and mtime in the node's namespace.
           May be zero or positive or negative numbers.
        """
        s = select([self.statistics.c.population, self.statistics.c.size],
                   and_(self.statistics.c.node == node,
                        self.statistics.c.cluster == cluster))
        rp = self.conn.execute(s)
        r = rp.fetchone()
        rp.close()
        if not r:
            prepopulation, presize = (0, 0)
        else:
            prepopulation, presize = r
        population += prepopulation
        population = max(population, 0)
        size += presize

        #insert or replace
        #TODO better upsert
        u = self.statistics.update().where(and_(
            self.statistics.c.node == node,
            self.statistics.c.cluster == cluster))
        u = u.values(population=population, size=size, mtime=mtime)
        rp = self.conn.execute(u)
        rp.close()
        if rp.rowcount == 0:
            ins = self.statistics.insert()
            ins = ins.values(node=node, population=population, size=size,
                             mtime=mtime, cluster=cluster)
            self.conn.execute(ins).close()
Пример #30
0
  def _type_get(self, name, interval, i_bucket, i_end=None):
    connection = self._client.connect()
    rval = OrderedDict()
    stmt = self._table.select()

    if i_end:
      stmt = stmt.where(
        and_(
          self._table.c.name==name,
          self._table.c.interval==interval,
          self._table.c.i_time>=i_bucket,
          self._table.c.i_time<=i_end,
        )
      )
    else:
      stmt = stmt.where(
        and_(
          self._table.c.name==name,
          self._table.c.interval==interval,
          self._table.c.i_time==i_bucket,
        )
      )
    stmt = stmt.order_by( self._table.c.r_time )

    for row in connection.execute(stmt):
      rval.setdefault(row['i_time'],OrderedDict())[row['r_time']] = row['value']
    return rval
Пример #31
0
    def process_dependencies(self, task, deplist, uowcommit, delete = False):
        #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
        connection = uowcommit.transaction.connection(self.mapper)
        secondary_delete = []
        secondary_insert = []
        secondary_update = []

        if self.prop._reverse_property:
            reverse_dep = getattr(self.prop._reverse_property, '_dependency_processor', None)
        else:
            reverse_dep = None

        if delete:
            for state in deplist:
                (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes)
                if deleted or unchanged:
                    for child in deleted + unchanged:
                        if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes):
                            continue
                        associationrow = {}
                        self._synchronize(state, child, associationrow, False, uowcommit)
                        secondary_delete.append(associationrow)
                        uowcommit.attributes[(self, "manytomany", state, child)] = True
        else:
            for state in deplist:
                (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key)
                if added or deleted:
                    for child in added:
                        if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes):
                            continue
                        associationrow = {}
                        self._synchronize(state, child, associationrow, False, uowcommit)
                        uowcommit.attributes[(self, "manytomany", state, child)] = True
                        secondary_insert.append(associationrow)
                    for child in deleted:
                        if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes):
                            continue
                        associationrow = {}
                        self._synchronize(state, child, associationrow, False, uowcommit)
                        uowcommit.attributes[(self, "manytomany", state, child)] = True
                        secondary_delete.append(associationrow)

                if not self.passive_updates and unchanged and self._pks_changed(uowcommit, state):
                    for child in unchanged:
                        associationrow = {}
                        self.syncrules.update(associationrow, state, child, "old_")
                        secondary_update.append(associationrow)

        if secondary_delete:
            secondary_delete.sort()
            # TODO: precompile the delete/insert queries?
            statement = self.secondary.delete(sql.and_(*[c == sql.bindparam(c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow]))
            result = connection.execute(statement, secondary_delete)
            if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_delete):
                raise exceptions.ConcurrentModificationError("Deleted rowcount %d does not match number of secondary table rows deleted from table '%s': %d" % (result.rowcount, self.secondary.description, len(secondary_delete)))

        if secondary_update:
            statement = self.secondary.update(sql.and_(*[c == sql.bindparam("old_" + c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow]))
            result = connection.execute(statement, secondary_update)
            if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_update):
                raise exceptions.ConcurrentModificationError("Updated rowcount %d does not match number of secondary table rows updated from table '%s': %d" % (result.rowcount, self.secondary.description, len(secondary_update)))

        if secondary_insert:
            statement = self.secondary.insert()
            connection.execute(statement, secondary_insert)
def amiexaminer(code):
    testvar = db.session.query(Teachers).filter(
        Teachers.email == session['user']).first()
    already = db.session.query(exists().where(
        and_(Courses.code == code, Courses.examiner == testvar))).scalar()
    return already
Пример #33
0
        else:
            hint = ""
        raise exc.ArgumentError("Can't find any foreign key relationships "
                                "between '%s' and '%s'.%s" %
                                (a.description, b.description, hint))
    elif len(constraints) > 1:
        raise exc.ArgumentError("Can't determine join between '%s' and '%s'; "
                                "tables have more than one foreign key "
                                "constraint relationship between them. "
                                "Please specify the 'onclause' of this "
                                "join explicitly." %
                                (a.description, b.description))
    elif len(crit) == 1:
        return (crit[0])
    else:
        return sql.and_(*crit)


class Annotated(object):
    """clones a ClauseElement and applies an 'annotations' dictionary.

    Unlike regular clones, this clone also mimics __hash__() and
    __cmp__() of the original element so that it takes its place
    in hashed collections.

    A reference to the original element is maintained, for the important
    reason of keeping its hash value current.  When GC'ed, the
    hash value may be reused, causing conflicts.

    """
    def __new__(cls, *args):
Пример #34
0
# 高级版查询操作,厉害了哦
# 老规矩
from create_table import User, engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import text

Session = sessionmaker(engine)
db_session = Session()

# 查询数据表操作
# and or
from sqlalchemy.sql import and_, or_

ret = db_session.query(User).filter(and_(User.id > 3,
                                         User.name == 'lhm')).all()
# ret = db_session.query(User).filter(or_(User.id < 2, User.name == 'lhm')).all()

# 查询所有数据
r1 = db_session.query(User).all()

# 查询数据 指定查询数据列 加入别名
r2 = db_session.query(User.name.label('username'), User.id).first()
print(r2.id, r2.username)  # 15 NBDragon

# 表达式筛选条件
r3 = db_session.query(User).filter(User.name == "lhm").all()

# 原生SQL筛选条件
r4 = db_session.query(User).filter_by(name='lhm').all()
r5 = db_session.query(User).filter_by(name='lhm').first()
Пример #35
0
def divBereken(row):
    jrwk = jaarweek()
    mstatuswk = row[12]
    metadata = MetaData()
    resultaten = Table('resultaten', metadata,
                       Column('resID', Integer(), primary_key=True),
                       Column('statusweek', String), Column('blonen', Float),
                       Column('wlonen', Float), Column('bmaterialen', Float),
                       Column('wmaterialen',
                              Float), Column('bmaterieel', Float),
                       Column('wmaterieel', Float), Column('binhuur', Float),
                       Column('winhuur', Float), Column('bdiensten', Float),
                       Column('wdiensten', Float),
                       Column('bprojectkosten', Float),
                       Column('wprojectkosten', Float),
                       Column('btotaal', Float), Column('wtotaal', Float),
                       Column('betaald_bedrag', Float),
                       Column('meerminderwerk', Float),
                       Column('onderhandenwerk', Float),
                       Column('boekweek',
                              String), Column('bruto_winst', Float),
                       Column('aanneemsom', Float))

    engine = create_engine('postgresql+psycopg2://postgres@localhost/bisystem')
    con = engine.connect()
    selres = select([resultaten]).where(and_(resultaten.c.statusweek == mstatuswk,\
                   resultaten.c.boekweek == jrwk))
    rpres = con.execute(selres).first()
    mstatus = row[11]
    mkosten = row[3]+row[4]+row[5]+row[6]+row[7]+row[8]+row[9]+row[10]+row[17]+\
              row[18]+row[19]+row[20]
    if mstatus == 'C':
        mwinst = row[2] / 3 - mkosten
    elif mstatus == 'D':
        mwinst = row[2] / 3 - mkosten
    elif mstatus == 'E':
        mwinst = row[2] * 2 / 3 - mkosten
    elif mstatus == 'F':
        mwinst = row[2] * .8667 - mkosten
    elif mstatus == 'G':
        mwinst = row[2] - mkosten
    elif mstatus == 'H':
        mwinst = row[2] + row[10] - mkosten
    else:
        mwinst = 0

    if not rpres:
        mresnr = (con.execute(select([func.max(resultaten.c.resID, type_=Integer)\
          .label('mresnr')])).scalar())
        mresnr += 1
        insres = insert(resultaten).values(resID = mresnr, statusweek = mstatuswk,\
          blonen=round(row[28],2), wlonen=round(row[4],2),bmaterialen=round(row[27],2),\
          wmaterialen=round(row[3],2),bmaterieel=round(row[26],2), wmaterieel=round(row[5],2),\
          binhuur=round(row[21],2), winhuur=round(row[21],2),bprojectkosten=round(row[14]+\
          row[15]+row[16]+row[22],2),wprojectkosten=round(row[6]+row[7]+row[8]+row[9],2),\
          btotaal=round(row[2],2), wtotaal=round(row[3]+row[4]+row[5]+row[6]+row[7]+row[8]\
          +row[9]+row[17]+row[18]+row[19]+row[20],2),betaald_bedrag=round(row[13],2),\
          meerminderwerk=round(row[10],2), onderhandenwerk=round(row[3]+row[4]+row[5]+\
          row[6]+row[7]+row[8]+row[9]+row[10]+row[17]+row[18]+row[19]+row[20]-row[13],2),\
          bruto_winst = round(mwinst,2), boekweek = jrwk, aanneemsom = round(row[2],2))
        con.execute(insres)
Пример #36
0
 def flushconfig(self, req, group_id, objtype, entity, body=None):
     body = body or {}
     group_id = int(group_id)
     jsonutils.schema_validate(body, self.FLUSH)
     if objtype == common.GAMESERVER:
         gm = body.pop(common.GMSERVER, 0)
         cross = body.pop(common.CROSSSERVER, 0)
         entitys = []
         if gm:
             entitys.append(gm)
         if cross:
             entitys.append(cross)
         entitys = list(set(entitys))
         if entitys:
             chiefs = {}
             session = endpoint_session()
             query = model_query(session,
                                 AppEntity,
                                 filter=and_(AppEntity.group_id == group_id,
                                             AppEntity.entity.in_(entitys)))
             gmsvr = crosssvr = None
             for appserver in query:
                 if appserver.group_id != group_id:
                     raise InvalidArgument('Entity group value error')
                 if appserver.objtype == common.GMSERVER:
                     if appserver.entity != gm:
                         raise InvalidArgument('Find %s but entity is %d' %
                                               (common.GMSERVER, gm))
                     gmsvr = appserver
                 elif appserver.objtype == common.CROSSSERVER:
                     if appserver.entity != cross:
                         raise InvalidArgument('Find %s but entity is %d' %
                                               (common.CROSSSERVER, cross))
                     crosssvr = appserver
             if gm and not gmsvr:
                 raise InvalidArgument('%s.%d can not be found' %
                                       (common.GMSERVER, gm))
             if cross and not crosssvr:
                 raise InvalidArgument('%s.%d can not be found' %
                                       (common.CROSSSERVER, cross))
             # 获取实体相关服务器信息(端口/ip)
             maps = entity_controller.shows(endpoint=common.NAME,
                                            entitys=entitys)
             if gmsvr:
                 chiefs.setdefault(
                     common.GMSERVER,
                     dict(
                         entity=gmsvr.entity,
                         ports=maps.get(gmsvr.entity).get('ports'),
                         local_ip=maps.get(
                             gmsvr.entity).get('metadata').get('local_ip')))
             if crosssvr:
                 chiefs.setdefault(
                     common.CROSSSERVER,
                     dict(entity=crosssvr.entity,
                          ports=maps.get(crosssvr.entity).get('ports'),
                          local_ip=maps.get(crosssvr.entity).get(
                              'metadata').get('local_ip')))
             body.update({'chiefs': chiefs})
     return self._async_bluck_rpc('flushconfig', group_id, objtype, entity,
                                  body)
Пример #37
0
    def _async_bluck_rpc(self,
                         action,
                         group_id,
                         objtype,
                         entity,
                         body=None,
                         context=None):
        caller = inspect.stack()[0][3]
        body = body or {}
        group_id = int(group_id)

        context = context or empty_context

        if entity == 'all':
            entitys = 'all'
        else:
            entitys = argutils.map_to_int(entity)
        asyncrequest = self.create_asyncrequest(body)
        target = targetutils.target_endpoint(common.NAME)
        session = endpoint_session(readonly=True)
        query = model_query(session,
                            AppEntity,
                            filter=and_(AppEntity.group_id == group_id,
                                        AppEntity.objtype == objtype))
        emaps = dict()

        for _entity in query:
            if _entity.status <= common.DELETED:
                continue
            if _entity.status != common.OK and action != 'stop':
                continue
            emaps.setdefault(_entity.entity, _entity.agent_id)

        if entitys == 'all':
            entitys = emaps.keys()
            agents = set(emaps.values())
        else:
            if entitys - set(emaps.keys()):
                raise InvalidArgument(
                    'Some entitys not found or status is not active')
            agents = set()
            for entity in emaps:
                if entity in entitys:
                    agents.add(emaps[entity])

        with context(asyncrequest.request_id, entitys, agents):
            async_ctxt = dict(pre_run=body.pop('pre_run', None),
                              after_run=body.pop('after_run', None),
                              post_run=body.pop('post_run', None))
            rpc_ctxt = {}
            rpc_ctxt.setdefault('agents', agents)
            rpc_method = '%s_entitys' % action
            rpc_args = dict(entitys=list(entitys))
            rpc_args.update(body)

            def wapper():
                self.send_asyncrequest(asyncrequest, target, rpc_ctxt,
                                       rpc_method, rpc_args, async_ctxt)

            threadpool.add_thread(safe_func_wrapper, wapper, LOG)

        return resultutils.results(
            result='gogamechen3 %s entitys %s spawning' % (objtype, caller),
            data=[asyncrequest.to_dict()])
Пример #38
0
class ObservedVlan(Base):
    """ reports the observance of a vlan/network on a switch """
    __tablename__ = 'observed_vlan'

    switch_id = Column(Integer, ForeignKey('switch.hardware_entity_id',
                                           ondelete='CASCADE',
                                           name='%s_hw_fk' % _ABV),
                       nullable=False)

    network_id = Column(Integer, ForeignKey('network.id',
                                            ondelete='CASCADE',
                                            name='%s_net_fk' % _ABV),
                        nullable=False)

    vlan_id = Column(Integer, ForeignKey('vlan_info.vlan_id',
                                           name='%s_vlan_fk' % _ABV),
                     nullable=False)

    creation_date = deferred(Column(DateTime, default=datetime.now,
                                    nullable=False))

    switch = relation(Switch, backref=backref('%ss' % _TN, cascade='delete',
                                              passive_deletes=True,
                                              order_by=[vlan_id]))
    network = relation(Network, backref=backref('%ss' % _TN, cascade='delete',
                                                passive_deletes=True,
                                                order_by=[vlan_id]))

    vlan = relation(VlanInfo, uselist=False,
                    primaryjoin=vlan_id == VlanInfo.vlan_id,
                    foreign_keys=[VlanInfo.vlan_id],
                    viewonly=True)

    __table_args__ = (PrimaryKeyConstraint(switch_id, network_id, vlan_id,
                                           name="%s_pk" % _TN),
                      CheckConstraint(and_(vlan_id >= 0,
                                           vlan_id < MAX_VLANS),
                                      name='%s_vlan_id_ck' % _TN),
                      Index('%s_network_idx' % _TN, 'network_id'),
                      Index('%s_vlan_idx' % _TN, 'vlan_id'))

    @property
    def port_group(self):
        if self.vlan:
            return self.vlan.port_group
        return None

    @property
    def vlan_type(self):
        if self.vlan:
            return self.vlan.vlan_type
        return None

    @property
    def guest_count(self):
        from aquilon.aqdb.model import (EsxCluster, Cluster, ClusterResource,
                                        Resource, VirtualMachine, Machine,
                                        HardwareEntity, Interface)
        session = object_session(self)
        q = session.query(func.count())
        q = q.filter(and_(
            # Select VMs on clusters that belong to the given switch
            EsxCluster.switch_id == self.switch_id,
            Cluster.id == EsxCluster.esx_cluster_id,
            ClusterResource.cluster_id == Cluster.id,
            Resource.holder_id == ClusterResource.id,
            VirtualMachine.resource_id == Resource.id,
            Machine.machine_id == VirtualMachine.machine_id,
            # Select interfaces with the right port group
            HardwareEntity.id == Machine.machine_id,
            Interface.hardware_entity_id == HardwareEntity.id,
            Interface.port_group == VlanInfo.port_group,
            VlanInfo.vlan_id == self.vlan_id))
        return q.scalar()

    @classmethod
    def get_network(cls, session, switch, vlan_id, compel=NotFoundException):
        q = session.query(cls).filter_by(switch=switch, vlan_id=vlan_id)
        nets = q.all()
        if not nets:
            raise compel("No network found for switch %s and VLAN %s" %
                         (switch.fqdn, vlan_id))
        if len(nets) > 1:
            raise InternalError("More than one network found for switch %s "
                                "and VLAN %s" % (switch.fqdn, vlan_id))
        return nets[0].network
Пример #39
0
 def bond(self, req, database_id, schema, body=None):
     """schema quote"""
     body = body or {}
     database_id = int(database_id)
     slave = body.get('slave', True)
     slave_id = body.get('slave_id')
     desc = body.get('desc')
     esure = body.get('esure', True)
     quote_id = body.get('quote_id')
     entity = body.pop('entity', None)
     entity = int(entity) if entity is not None else None
     endpoint = body.pop(common.ENDPOINTKEY, None)
     if esure:
         if not endpoint or not entity:
             raise InvalidArgument(
                 'No endpoint info or entity, esure should be flase')
         # TODO log entity info
         entity_info = entity_controller.show(req=req,
                                              endpoint=endpoint,
                                              entity=entity)['data'][0]
     session = endpoint_session()
     query = model_query(session,
                         GopDatabase,
                         filter=and_(GopDatabase.database_id == database_id,
                                     GopDatabase.slave == 0))
     query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
     _database = query.one()
     _schema = None
     for __schema in _database.schemas:
         if __schema.schema == schema:
             _schema = __schema
             break
     if not _schema:
         raise exceptions.AcceptableSchemaError('Schema %s not found' %
                                                schema)
     quote_database_id = _database.database_id
     user = _schema.user
     passwd = _schema.passwd
     # glock = get_global().lock('entitys')
     # with glock(common.DB, [entity, ]):
     if slave:
         slaves = [
             _slave.slave_id for _slave in _database.slaves if _slave.ready
         ]
         if slave_id:
             if slave_id not in slaves:
                 raise exceptions.AcceptableDbError(
                     'Slave %d not found or not ready' % slave)
             quote_database_id = slave_id
         else:
             if slaves:
                 quote_database_id = slaves[0]
             else:
                 LOG.warning(
                     'Not slave database, use master database as slave')
         user = _schema.ro_user
         passwd = _schema.ro_passwd
     address = _address([
         quote_database_id,
     ]).get(quote_database_id)
     with session.begin():
         schema_quote = SchemaQuote(quote_id=quote_id,
                                    schema_id=_schema.schema_id,
                                    qdatabase_id=quote_database_id,
                                    entity=entity,
                                    endpoint=endpoint,
                                    desc=desc)
         session.add(schema_quote)
         session.flush()
     port = address.get('port')
     host = address.get('host')
     return resultutils.results(
         result='quote to %s.%d success' %
         (schema_quote.qdatabase_id, schema_quote.schema_id),
         data=[
             dict(schema_id=schema_quote.schema_id,
                  quote_id=schema_quote.quote_id,
                  qdatabase_id=quote_database_id,
                  host=host,
                  port=port,
                  user=user,
                  passwd=passwd,
                  schema=schema)
         ])
Пример #40
0
def getradios():
    """ Generate bdd request for forms models """
    radios = Music.query.filter(and_(Music.music_type=='1', Music.users==current_user.id))
    return radios
Пример #41
0
    def stat5(self):

        """ dal db di jobcontroll
            
            sqlite> .mode list
            sqlite> .separator |
            sqlite> .output test_file_1.txt
            sqlite> select project_name,start_date from jobs;
        """
        jobco_fi = open("/home/webtest/WebENMR/data/temp/maxocc.txt", "r").readlines()
        jobco = []
        #jobco contain calculation name and date if submittion
        for i in jobco_fi:
            jobco.append((i.split("|")[0], i.split("|")[1]))
            
        tot_users = 0
        tot_activeUsers = 0
        tot_runs = 0
        tot_runsForUser = {} #user->runs
        tot_jobs = 0
        jobs_user = {}
        users_list = Session.query(Users).all()
        for u in users_list:
                portalUser = False
                projects_list = Session.query(Projects).filter(and_(Projects.owner_id==u.id)).all()
                jobs = []
                if projects_list:
                    active = False
                    for p in projects_list:
                        calculations_list = Session.query(Calculations).filter(and_(Calculations.project_id==p.id, Calculations.calc_type_id == 5, Calculations.creation_date >= dt)).all()
                        if calculations_list:
                            for c in calculations_list:
                                for z in jobco:
                                    if c.name == z[0]:
                                        print "trovato" + z[0]
                                        jobs.append(parser.parse(z[1]).strftime("%A %d %B %Y %I:%M:%S%p"))
                                    
                                
                                #jobs_list = Session.query(Jobs).filter(and_(Jobs.calculation_id==c.id)).all()
                                #if jobs_list:
                                #    for j in jobs_list:
                                #        if j.start_date:
                                           # jobs.append(j.start_date.strftime("%A %d %B %Y %I:%M:%S%p"))
                                           
                    jobs_user[u.email] = jobs
        print jobs_user
        opnfl = open("/home/webenmr/WebENMR/data/temp/statJobUsermaxocc.txt", "w")
        #opnfl = open(os.path.join(config['app_conf']['amber_data'], session.get('DIR_CACHE'), 'statJobUsermaxocc.txt'), 'w')
        opnfl.write(str(len(jobs_user))+'\n')
        totjobs = 0
        for k in jobs_user:
            if k:
                totjobs = totjobs + len(jobs_user[k])
        opnfl.write(str(totjobs))
        
        #for k in jobs_user:
        #    if k:
        #        opnfl.write('****'+k+'\n')
        #        if jobs_user[k]:
        #            opnfl.write('\n'.join(jobs_user[k]))
        #            opnfl.write('\n')
        #        else:
        #            opnfl.write('[no jobs]\n')
        opnfl.close()
Пример #42
0
 def generate(self):
     "generate query"
     for filt in self.filters:
         attr = getattr(self.model, filt['field'])
         if filt['filter'] == '1':
             key = "%s_equal" % filt['field']
             expr = attr == filt['value']
             self._load_keys(key, expr)
         if filt['filter'] == '2':
             key = "%s_notequal" % filt['field']
             expr = attr != filt['value']
             self._load_keys(key, expr)
         if filt['filter'] == '3':
             key = "%s_greaterthan" % filt['field']
             expr = attr > filt['value']
             self._load_keys(key, expr)
         if filt['filter'] == '4':
             key = "%s_lessthan" % filt['field']
             expr = attr < filt['value']
             self._load_keys(key, expr)
         if filt['filter'] == '5':
             key = "%s_contains" % filt['field']
             expr = attr.ilike('%' + filt['value'] + '%')
             self._load_keys(key, expr)
         if filt['filter'] == '6':
             key = "%s_ncontains" % filt['field']
             expr = ~attr.ilike('%' + filt['value'] + '%')
             self._load_keys(key, expr)
         if filt['filter'] == '7':
             key = "%s_regex" % filt['field']
             expr = attr.op('regexp')(filt['value'])
             self._load_keys(key, expr)
         if filt['filter'] == '8':
             key = "%s_nregex" % filt['field']
             expr = ~attr.op('regexp')(filt['value'])
             self._load_keys(key, expr)
         if filt['filter'] == '9':
             key = "%s_isnull" % filt['field']
             expr = attr == None
             self._load_keys(key, expr)
         if filt['filter'] == '10':
             key = "%s_nisnull" % filt['field']
             expr = attr != None
             self._load_keys(key, expr)
         if filt['filter'] == '11':
             key = "%s_istrue" % filt['field']
             expr = attr > 0
             self._load_keys(key, expr)
         if filt['filter'] == '12':
             key = "%s_isfalse" % filt['field']
             expr = attr <= 0
             self._load_keys(key, expr)
     self.addclauses.extend(self.kwargs.values())
     if self.addclauses or self.orclauses:
         if self.addclauses and self.orclauses:
             orlist = [
                 or_(*self.orclauses[mikey]) for mikey in self.orclauses
             ]
             query = self.query.filter(and_(and_(*self.addclauses),
                                            *orlist))
         else:
             if self.addclauses:
                 query = self.query.filter(and_(*self.addclauses))
             if self.orclauses:
                 orlist = [
                     or_(*self.orclauses[okey]) for okey in self.orclauses
                 ]
                 query = self.query.filter(and_(*orlist))
         return query
     return self.query
Пример #43
0
def crawl(fname, phenotype_fname, db_session):
    phen2id = load_phenotypes(phenotype_fname, db_session)

    with open(fname) as f:
        f.readline()
        for i, line in enumerate(f):
            fields = line.split('\t')

            if i % 500 == 0:
                print '%d associations parsed' % i

            # create paper
            pubmed_id = _get_int(fields[1])
            journal_name = fields[4]
            title = _normalize_str(fields[6])

            paper = db_session.query(Paper).filter(
                Paper.pubmed_id == pubmed_id).first()
            if not paper:
                paper = Paper(pubmed_id=pubmed_id,
                              title=title,
                              journal=journal_name)
                db_session.add(paper)
                db_session.commit()

            # create snp
            chrom = _get_chrom(fields[11])
            pos = _get_int(fields[12])
            rs_id = fields[21]
            ref = 'GRCh38'

            snp = db_session.query(SNP).filter(SNP.rs_id == rs_id).first()
            if not snp:
                snp = SNP(chrom=chrom, position=pos, rs_id=rs_id)
                db_session.add(snp)
                db_session.commit()

            # create phenotype
            phenotype_name = _normalize_str(fields[7].lower())
            phenotype = db_session.query(Phenotype).filter(
                and_(
                    Phenotype.name == phenotype_name,
                    Phenotype.source == 'gwas_catalog',
                )).first()  # max 1 phenotype from gwas_catalog
            if not phenotype:
                phenotype = Phenotype(name=phenotype_name,
                                      source='gwas_catalog')
                db_session.add(phenotype)
                db_session.commit()

            # add links to existing efo phenotypes
            if phenotype_name in phen2id:
                for efo_id in phen2id[phenotype_name]:
                    phenotypes = db_session.query(Phenotype).filter(
                        and_(
                            Phenotype.ontology_ref == efo_id,
                            Phenotype.source == 'efo',
                        )).all()
                    if len(phenotypes) != 1:
                        print[(p.name, p.ontology_ref) for p in phenotypes]
                        raise Exception(
                            'Could not find unique phenotype entry for %s (%s)'
                            % (phenotype_name, efo_id))
                    if phenotypes[0] not in phenotype.equivalents:
                        phenotype.equivalents.append(phenotypes[0])
                db_session.commit()

            # create association
            n_cases = _get_single_digit(fields[8])
            n_controls = _get_single_digit(fields[9])
            pop = fields[8] + ' ' + fields[9]
            freq = _get_float(fields[26])
            pvalue = _get_float(fields[27])
            beta_params = _normalize_str(fields[31])
            oddsratio, beta = _get_or(fields[30], fields[31])
            allele = _get_allele(fields[18])

            db_session.add(
                Association(snp=snp,
                            phenotype=phenotype,
                            paper=paper,
                            freq=freq,
                            pvalue=pvalue,
                            oddsratio=oddsratio,
                            beta=beta,
                            allele=allele,
                            beta_params=beta_params,
                            source='gwas_catalog'))

            # print pubmed_id, pvalue

            db_session.commit()
Пример #44
0
    def statDB(self, portal):
        #portals = {'amber': 2, 'xplor': 1, 'maxocc' : 5}
        dt = date(2013, 8, 01)
        #users_list = []
        #users_list.append({337: 337})
        #users_list.append({5: 10})
        
        pippo = '''| /O=dutchgrid/O=users/O=universiteit-utrecht/OU=chem/CN=Alexandre Bonvin                   |    3 |
| /O=dutchgrid/O=users/O=universiteit-utrecht/OU=chem/CN=Adrien Samuel Jacky Melquiond      |   12 |
| /C=IT/O=INFN/OU=Personal Certificate/L=CIRMMP/CN=Andrea Giachetti                         |   16 |
| /C=DE/O=GermanGrid/OU=UniFrankfurt/CN=Peter Guentert                                      |   23 |
| /O=dutchgrid/O=users/O=universiteit-utrecht/OU=chem/CN=Nuno Loureiro Ferreira             |   45 |
| /C=TW/O=AS/OU=GRID/CN=SHU-JU HSIEH 692179                                                 |  273 |
| /C=TW/O=AS/OU=GRID/CN=Pomin Shih 933244                                                   |  274 |
| /C=TW/O=AS/OU=GRID/CN=Steve Yu 741725                                                     |  577 |
| /DC=org/DC=doegrids/OU=People/CN=yazan akkam 321744                                       |  586 |
| /C=IT/O=INFN/OU=Personal Certificate/L=CIRMMP/CN=Linda Cerofolini                         |  609 |
| /C=IT/O=INFN/OU=Personal Certificate/L=CIRMMP/CN=Daniela Lalli                            |  611 |
| /C=TW/O=AS/OU=GRID/CN=Chia-Cheng Chou 142039                                              |  618 |
| /C=TW/O=AS/OU=GRID/CN=Iren Wang 953069                                                    |  649 |
| /C=TW/O=AS/OU=GRID/CN=Chih-Ta Henry Chien 856297                                          |  650 |
| /C=PL/O=GRID/O=AMU/CN=Malgorzata Szelag                                                   |  651 |
| /C=TW/O=AS/OU=GRID/CN=Chung-ke Chang 238145                                               |  657 |
| /C=TW/O=AS/OU=GRID/CN=Yu Chiang Pan 837647                                                |  667 |
| /C=TW/O=AS/OU=GRID/CN=Yuan-Chao Lou 498134                                                |  671 |
| /C=TW/O=NCU/OU=GRID/CN=KUANFU Lin 558253                                                  |  674 |
| /C=TW/O=AS/OU=GRID/CN=Hsin-Yen Chen 132111                                                |  684 |
| /DC=com/DC=DigiCert-Grid/O=Open Science Grid/OU=People/CN=Jeffrey Lee 1225                |  723 |
| /DC=com/DC=DigiCert-Grid/O=Open Science Grid/OU=People/CN=Valjean Raiden Bacot-Davis 1321 |  744 |
| /C=DE/O=GermanGrid/OU=UniFrankfurt/CN=Nina Alexandra Christ                               |  858 |
| /DC=IN/DC=GARUDAINDIA/O=C-DAC/OU=CTSF/CN=SRINIDHI R ([email protected])                |  859 |
| /DC=IN/DC=GARUDAINDIA/O=C-DAC/OU=CTSF/CN=Biswajit Gorai ([email protected])        |  885 |
| /DC=com/DC=DigiCert-Grid/O=Open Science Grid/OU=People/CN=Oren Rosenberg 1688             |  893 |
| /DC=IN/DC=GARUDAINDIA/O=C-DAC/OU=CTSF/CN=Surajit Debnath ([email protected])          |  894 |
| /C=TW/O=AP/OU=GRID/CN=Mukesh Mahajan 127570                                               |  988 |
| /O=GRID-FR/C=FR/O=CNRS/OU=IMOPA/CN=Benjamin Chagot                                        | 1024 |
| /C=IT/O=INFN/OU=Personal Certificate/L=CIRMMP/CN=Lucio Ferella                        |   67 | 
| /C=UK/O=eScience/OU=Oxford/L=OeSC/CN=jonathan elegheert                                   | 1080 |'''

        diz ={}        
        for i in pippo.split("\n"):
            if len(i) > 1:
                b = i.split("|")
                diz[b[1].strip()] = b[2].strip()
            
        opnfl = open("/home/webenmr/WebENMR/data/temp/statXplorDB.txt", "w")
        for u in diz.keys():
                #portalUser = False
                user = Session.query(Users).filter(and_(Users.dn==u)).first()
                if user:
                    print "-" + u + "-"
                    projects_list = Session.query(Projects).filter(and_(Projects.owner_id==user.id)).all()
                    jobs = []
                    if projects_list:
                        #active = False
                        for p in projects_list:
                            calculations_list = Session.query(Calculations).filter(and_(Calculations.project_id==p.id, Calculations.calc_type_id == portal, Calculations.creation_date >= dt)).all()
                            if calculations_list:
                                for c in calculations_list:
                                    jobs_list = Session.query(Jobs).filter(and_(Jobs.calculation_id==c.id, Jobs.status == 'E')).all()
                                    if jobs_list:
                                        for j in jobs_list:
                                            opnfl.write("INSERT INTO ssoxs_amber_stats(uid, jid, sdate, fdate, ip, country, status, message) VALUES("+diz[u]+", "+str(j.id)+", "+j.running_date.strftime('%s')+", "+j.done_date.strftime('%s')+", '150.217.163.184', 'XX', 5, "");\n")
        opnfl.close()
Пример #45
0
def generate_info(panel, session, condition, experiments, results):
    results['statistics'].update({'uses': 0, 'total_time': 0})

    max_time = 0.1

    for exp, values in experiments.iteritems():
        max_time_allowed = max(
            [time_allowed for time_allowed, permission_id in values])
        max_time = max(max_time, max_time_allowed)

    # Get something different per experiment
    # TODO
    # if len(permission_ids) > 1:
    #     pass

    # Get the totals
    users_time = defaultdict(int)
    # {
    #     login : seconds
    # }
    time_per_day = defaultdict(list)
    # {
    #     '2013-01-01' : [5,3,5]
    # }

    user_id_cache = {}
    users = defaultdict(int)
    for user_id, login, full_name, uses in session.execute(
            sql.select(
                [
                    model.DbUser.id, model.DbUser.login,
                    model.DbUser.full_name,
                    sa_func.count(model.DbUserUsedExperiment.id)
                ],
                sql.and_(model.DbUserUsedExperiment.user_id == model.DbUser.id,
                         condition)).group_by(
                             model.DbUserUsedExperiment.user_id)):
        user_id_cache[user_id] = login
        users[login, full_name] = uses
        results['statistics']['uses'] += uses

    per_hour = defaultdict(lambda: defaultdict(int))
    # {
    #     'saturday' : {
    #         23 : 5,
    #     }
    # }
    week_days = [
        'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday',
        'sunday'
    ]
    for hour, week_day, uses in session.execute(
            sql.select([
                model.DbUserUsedExperiment.start_date_hour,
                model.DbUserUsedExperiment.start_date_weekday,
                sa_func.count(model.DbUserUsedExperiment.id)
            ], condition).group_by(
                model.DbUserUsedExperiment.start_date_hour,
                model.DbUserUsedExperiment.start_date_weekday)):
        per_hour[week_days[week_day]][hour] = uses

    per_day = defaultdict(int)
    # {
    #     '2013-01-01' : 5
    # }
    per_week = defaultdict(int)
    # {
    #     '2013-01-01' : 5 # being 2013-01-01 that monday
    # }
    min_day = datetime.date(2100, 1, 1)
    max_day = datetime.date(1900, 1, 1)
    for start_date_date, uses in session.execute(
            sql.select([
                model.DbUserUsedExperiment.start_date_date,
                sa_func.count(model.DbUserUsedExperiment.id)
            ], condition).group_by(
                model.DbUserUsedExperiment.start_date_date)):
        if start_date_date > max_day:
            max_day = start_date_date
        if start_date_date < min_day:
            min_day = start_date_date
        per_day[start_date_date.strftime('%Y-%m-%d')] = uses
        week_day = start_date_date.weekday()
        per_week[start_date_date - datetime.timedelta(days=week_day)] += uses

    for user_id, microseconds in session.execute(
            sql.select(
                [
                    model.DbUserUsedExperiment.user_id,
                    sa_func.sum(model.DbUserUsedExperiment.session_time_micro)
                ],
                sql.and_(condition,
                         model.DbUserUsedExperiment.session_time_micro !=
                         None)).group_by(model.DbUserUsedExperiment.user_id)):
        users_time[user_id_cache[user_id]] = microseconds / 1000000
    results['users_time'] = users_time

    per_block_size = defaultdict(int)
    NUM_BLOCKS = 20
    block_size = max_time / NUM_BLOCKS
    for session_time_seconds, count_cases in session.execute(
            sql.select([
                model.DbUserUsedExperiment.session_time_seconds,
                sa_func.count(model.DbUserUsedExperiment.session_time_seconds)
            ], condition).group_by(
                model.DbUserUsedExperiment.session_time_seconds)):
        if session_time_seconds is not None:
            if block_size > 0:
                block_number = int(session_time_seconds / block_size)
            else:
                block_number = 0
            per_block_size[block_number] += count_cases

    for start_date_date, session_time_micro, session_number in session.execute(
            sql.select([
                model.DbUserUsedExperiment.start_date_date,
                sa_func.sum(model.DbUserUsedExperiment.session_time_micro),
                sa_func.count(model.DbUserUsedExperiment.id)
            ],
                       sql.and_(
                           condition,
                           model.DbUserUsedExperiment.session_time_micro !=
                           None)).group_by(
                               model.DbUserUsedExperiment.start_date_date)):
        time_per_day[start_date_date.strftime(
            '%Y-%m-%d')] = session_time_micro / session_number / 1000000
        results['statistics']['total_time'] += session_time_micro / 1000000

    links, hashes = generate_links(session, condition)
    # hashes = { file_hash : [ (use.id, user.id, datetime, login), ... ] }
    results['links'] = links

    if hashes:
        new_hashes = defaultdict(list)
        total_copies_per_date = defaultdict(int)
        total_copy_time_diffs = []
        min_diff = 100 * 365 * 24 * 3600  # a century
        max_diff = 0
        # new_hashes = { (file_hash, first_login) : [ (use_id, user_id, datetime, login), ... ] } with different logins
        for file_hash, uses in hashes.items():
            original_user_id = uses[0][1]
            original_dt = uses[0][2]
            original_login = uses[0][3]
            for use_id, user_id, dt, login in uses:
                if user_id != original_user_id:
                    difference = dt - original_dt
                    difference = (
                        difference.microseconds +
                        (difference.seconds + difference.days * 24 * 3600) *
                        10**6) / 10**6
                    current_use = (use_id, user_id, dt, login, difference)
                    new_hashes[(file_hash, original_login)].append(current_use)
                    total_copies_per_date[dt.strftime('%Y-%m-%d')] += 1
                    total_copy_time_diffs.append(difference)
                    if difference > max_diff:
                        max_diff = difference
                    if difference < min_diff:
                        min_diff = difference

        DIFF_STEPS = 50
        DIFF_STEP = math.log10(max_diff) / DIFF_STEPS
        diff_distribution = []

        for pos in range(DIFF_STEPS):
            min_value = 10**(DIFF_STEP * pos)
            max_value = 10**(DIFF_STEP * (pos + 1))
            current_value = 0
            for time_diff in total_copy_time_diffs:
                if min_value < time_diff <= max_value:
                    current_value += 1

            diff_distribution.append({
                'header':
                "%s - %s" % (to_human(min_value), to_human(max_value)),
                'value':
                current_value,
            })

        # Remove first steps
        while diff_distribution and diff_distribution[0]['value'] == 0:
            diff_distribution.pop(0)

        results['copies.time.diff'] = {
            'min_diff': min_diff,
            'max_diff': max_diff,
            'distribution': json.dumps(diff_distribution),
        }
        per_day_nvd3 = to_nvd3(per_day)
        for key in per_day:
            if key not in total_copies_per_date:
                total_copies_per_date[key] = 0
        total_copies_per_date_nvd3 = to_nvd3(total_copies_per_date)

        results['copies.dates'] = {
            'normal': per_day,
            'copies': total_copies_per_date,
            'min': min_day,
            'max': max_day,
        }
        results['copies.timelines'] = json.dumps(
            [{
                'key': 'Total',
                'values': per_day_nvd3
            }, {
                'key': 'Copies',
                'values': total_copies_per_date_nvd3
            }],
            indent=4)

    results['statistics']['total_time_human'] = datetime.timedelta(
        seconds=int(results['statistics']['total_time']))

    per_block_headers = []
    per_block_values = []
    for block_num in range(NUM_BLOCKS):
        per_block_headers.append('%s-%s' % (block_num * block_size,
                                            (block_num + 1) * block_size))
        per_block_values.append(per_block_size[block_num])
    per_block_headers.append('On finish')
    per_block_values.append(per_block_size[NUM_BLOCKS])
    results['per_block_headers'] = per_block_headers
    results['per_block_values'] = per_block_values

    if results['mode'] in ('group', 'total'):
        results['statistics']['avg_per_user'] = 1.0 * results['statistics'][
            'users'] / (results['statistics']['uses'] or 1)

    if per_day:
        timeline_headers, timeline_values = zip(
            *sorted(per_day.items(), lambda (d1, v1), (d2, v2): cmp(d1, d2)))
Пример #46
0
async def delete_order(conn, user_id, name):
    result = await conn.execute(orders.
                                delete().
                                where(and_(orders.c.name == name, orders.c.user_id == user_id)))
    return result
Пример #47
0
def debug(context, connection_string, force):
    try:
        engine = database_handler.SQLServer.create_engine(connection_string)
        session = sqlalchemy.orm.scoped_session(
            sqlalchemy.orm.sessionmaker(bind=engine))

        # Get latest run id.
        last_run = session.query(Run).order_by(Run.id.desc()).first()

        # Get all failed actions.
        actions = session.query(BuildAction).filter(
            and_(BuildAction.run_id == last_run.id,
                 sqlalchemy.sql.func.length(BuildAction.failure_txt) != 0))

        debug_env = analyzer_env.get_check_env(context.path_env_extra,
                                               context.ld_lib_path_extra)

        crash_handler = analyzer_crash_handler.AnalyzerCrashHandler(
            context, debug_env)

        dumps_dir = context.dump_output_dir
        if not os.path.exists(dumps_dir):
            os.mkdir(dumps_dir)

        LOG.info('Generating gdb dump files to : ' + dumps_dir)

        for action in actions:
            LOG.info('Processing action ' + str(action.id) + '.')
            debug_log_file = \
                os.path.join(dumps_dir,
                             get_dump_file_name(last_run.id, action.id))

            if not force and os.path.exists(debug_log_file):
                LOG.info('This file already exists.')
                continue

            LOG.info('Generating stacktrace with gdb.')

            gdb_result = \
                crash_handler.get_crash_info(str(action.check_cmd).split())

            LOG.info('Writing debug info to file.')

            with open(debug_log_file, 'w') as log_file:
                log_file.write('========================\n')
                log_file.write('Build command hash: \n')
                log_file.write('========================\n')
                log_file.write(action.build_cmd_hash + '\n')
                log_file.write('===============\n')
                log_file.write('Check command: \n')
                log_file.write('===============\n')
                log_file.write(action.check_cmd + '\n')
                log_file.write('==============\n')
                log_file.write('Failure text: \n')
                log_file.write('==============\n')
                log_file.write(action.failure_txt + '\n')
                log_file.write('==========\n')
                log_file.write('GDB info: \n')
                log_file.write('==========\n')
                log_file.write(gdb_result)

        LOG.info('All new debug files are placed in ' + dumps_dir)

    except KeyboardInterrupt as kb_exc:
        LOG.error(str(kb_exc))
        sys.exit(1)
Пример #48
0
    def filtering(self):
        search_value = self.request_values.get('sSearch')
        condition = None

        def search(idx, col):
            tmp_column_name = col.column_name.split('.')
            for tmp_name in tmp_column_name:
                if tmp_column_name.index(tmp_name) == 0:
                    obj = getattr(self.sqla_object, tmp_name)
                    parent = self.sqla_object
                elif isinstance(obj.property, RelationshipProperty):
                    parent = obj.property.mapper.class_
                    obj = getattr(parent, tmp_name)
                if not hasattr(obj, 'property'):
                    sqla_obj = parent
                    column_name = tmp_name
                elif isinstance(obj.property, RelationshipProperty):
                    sqla_obj = obj.mapper.class_
                    column_name = tmp_name
                    if not column_name:
                        column_name = obj.property.table.primary_key.columns \
                            .values()[0].name
                else:
                    sqla_obj = parent
                    column_name = tmp_name
            return sqla_obj, column_name

        if search_value:
            search_value_list = str(search_value).split()
            for search_val in search_value_list:
                conditions = []
                for idx, col in enumerate(self.columns):
                    if self.request_values.get('bSearchable_%s' % idx) in (
                            True, 'true') and col.searchable:
                        sqla_obj, column_name = search(idx, col)
                        conditions.append(
                            cast(get_attr(sqla_obj, column_name),
                                 String).ilike('%%%s%%' % search_val))
                condition = or_(*conditions)
                if condition is not None:
                    self.query = self.query.filter(condition)
        conditions = []
        for idx, col in enumerate(self.columns):
            search_value2 = self.request_values.get('sSearch_%s' % idx)
            if search_value2:
                sqla_obj, column_name = search(idx, col)
                if col.search_like:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name),
                             String).ilike('%%%s%%' % search_value2))
                else:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name),
                             String).__eq__(search_value2))
                if condition is not None:
                    condition = and_(condition, and_(*conditions))
                else:
                    condition = and_(*conditions)
        if condition is not None:
            self.query = self.query.filter(condition)
            self.cardinality_filtered = self.query.count()
        else:
            self.cardinality_filtered = self.cardinality
Пример #49
0
class Session(db.Model, TypenameMixin, StatusPredicatesMixin, HasSubjectsMixin,
              UserDetailsMixin, TimespanMixin):

    id = db.Column(db.Integer, primary_key=True)
    logical_id = db.Column(db.String(256), unique=True, index=True)
    updated_at = db.Column(db.DateTime(),
                           onupdate=datetime.now,
                           index=True,
                           nullable=True)

    parent_logical_id = db.Column(db.String(256),
                                  db.ForeignKey('session.logical_id',
                                                ondelete='CASCADE'),
                                  default=None,
                                  index=True)
    children = db.relationship('Session',
                               backref=backref('parent',
                                               remote_side=[logical_id]))
    is_parent_session = db.Column(db.Boolean, server_default='FALSE')
    child_id = db.Column(db.String(20), default=None)

    start_time = db.Column(db.Float, default=get_current_time)
    end_time = db.Column(db.Float, default=None, index=True)
    hostname = db.Column(db.String(100))

    in_pdb = db.Column(db.Boolean, server_default='FALSE')

    infrastructure = db.Column(db.String(50), default=None)

    tests = db.relationship('Test',
                            backref=backref('session', lazy='joined'),
                            cascade='all, delete, delete-orphan')
    errors = db.relationship('Error',
                             backref=backref('session', lazy='joined'))
    comments = db.relationship('Comment',
                               primaryjoin='Comment.session_id==Session.id')
    metadata_items = db.relationship('SessionMetadata',
                                     lazy='dynamic',
                                     cascade='all, delete, delete-orphan')

    subject_instances = db.relationship('SubjectInstance',
                                        secondary=session_subject,
                                        backref=backref('sessions',
                                                        lazy='dynamic'),
                                        lazy='joined',
                                        order_by=session_subject.c.ordinal)

    labels = db.relationship('Label',
                             secondary='session_label',
                             lazy='joined',
                             order_by="Label.name")

    # test counts
    total_num_tests = db.Column(db.Integer, default=None)
    num_failed_tests = db.Column(db.Integer, default=0)
    num_error_tests = db.Column(db.Integer, default=0)
    num_skipped_tests = db.Column(db.Integer, default=0)
    num_finished_tests = db.Column(db.Integer, default=0)
    num_interruptions = db.Column(db.Integer, default=0)
    num_interrupted_tests = db.Column(db.Integer, server_default="0")
    num_warnings = db.Column(db.Integer, nullable=False, server_default="0")
    num_test_warnings = db.Column(db.Integer,
                                  nullable=False,
                                  server_default="0")

    user_id = db.Column(db.Integer,
                        db.ForeignKey('user.id', ondelete='CASCADE'),
                        index=True,
                        nullable=False)
    user = db.relationship('User', lazy='joined', foreign_keys=user_id)
    real_user_id = db.Column(db.Integer,
                             db.ForeignKey('user.id', ondelete='CASCADE'),
                             nullable=True)
    real_user = db.relationship('User',
                                lazy='joined',
                                foreign_keys=real_user_id)

    # status
    num_errors = db.Column(db.Integer, default=0)
    num_failures = db.Column(db.Integer, default=0)
    status = db.Column(db.String(20), nullable=False, default=statuses.STARTED)

    # keepalive
    keepalive_interval = db.Column(db.Integer, nullable=True, default=None)
    next_keepalive = db.Column(db.Float,
                               nullable=True,
                               default=None,
                               index=True)
    reporting_stopped = db.Column(db.Boolean, default=False)

    # activity
    num_comments = db.Column(db.Integer, default=0)

    has_fatal_errors = db.Column(db.Boolean, default=False)

    delete_at = db.Column(db.Float, nullable=True)
    ttl_seconds = db.Column(db.Integer, nullable=True)

    __table_args__ = (
        Index('ix_session_start_time', start_time.desc()),
        Index('ix_session_status_lower', func.lower(status)),
        Index('ix_session_start_time_status_lower', start_time.desc(),
              func.lower(status)),
        Index('ix_session_timespan', 'timespan', postgresql_using='gist'),
        Index('ix_session_delete_at',
              delete_at,
              postgresql_where=(delete_at != None)),
        Index('ix_session_updated_at',
              updated_at.asc(),
              postgresql_where=(updated_at != None)),
    )

    last_comment_obj = db.relationship(
        lambda: Comment,
        primaryjoin=lambda: and_(
            Session.id == Comment.session_id,  # pylint: disable=undefined-variable
            Comment.timestamp == select([func.max(Comment.timestamp)]).where(
                Comment.session_id == Session.id).correlate(Session.__table__)
        ),
        uselist=False,
        lazy='joined')

    @rendered_field
    def last_comment(self):
        comment = self.last_comment_obj
        if comment is None:
            return None

        return {'comment': comment.comment, 'user_email': comment.user.email}

    @rendered_field
    def is_abandoned(self):
        if self.next_keepalive is None:
            return False
        if self.next_keepalive > get_current_time():
            return False
        return self.end_time is None

    # rendered extras
    related_entities = db.relationship('Entity', secondary='session_entity')

    @rendered_field
    def real_email(self):
        user = self.real_user
        if user is None:
            return None
        return user.email

    @rendered_field(name='labels')
    def label_names(self):
        return [l.name for l in self.labels]

    def update_keepalive(self):
        if self.keepalive_interval is not None:
            next_keepalive = flux.current_timeline.time(
            ) + self.keepalive_interval
            self.next_keepalive = next_keepalive
            self.extend_timespan_to(next_keepalive)
            if self.ttl_seconds is not None:
                self.delete_at = self.next_keepalive + self.ttl_seconds

    def notify_subject_activity(self):
        for subject_instance in self.subject_instances:
            subject_instance.subject.last_activity = max(
                subject_instance.subject.last_activity or 0,
                flux.current_timeline.time())
Пример #50
0
def generate_links(session, condition):
    hashes = defaultdict(list)
    #
    # {
    #     'file_hash' : [(use.id, user.id, datetime, login), (use.id,user.id, datetime, login), (use.id, user.id, datetime, login)]
    # }
    #
    multiuser_file_hashes = sql.select(
        [model.DbUserFile.file_hash],
        sql.and_(
            condition, model.DbUserFile.experiment_use_id ==
            model.DbUserUsedExperiment.id,
            model.DbUser.id == model.DbUserUsedExperiment.user_id,
            not_(model.DbUserFile.file_hash.in_(EMPTY_HASHES))),
        use_labels=True).group_by(model.DbUserFile.file_hash).having(
            sa_func.count(distinct(model.DbUserUsedExperiment.user_id)) > 1
        ).alias('foo')

    joined = outerjoin(
        multiuser_file_hashes, model.DbUserFile, model.DbUserFile.file_hash ==
        multiuser_file_hashes.c.UserFile_file_hash)

    files_query = sql.select(
        [
            model.DbUserUsedExperiment.id, model.DbUserUsedExperiment.user_id,
            model.DbUserFile.file_hash, model.DbUser.login,
            model.DbUser.full_name, model.DbUserUsedExperiment.start_date
        ],
        sql.and_(
            condition, model.DbUserFile.experiment_use_id ==
            model.DbUserUsedExperiment.id, model.DbUser.id ==
            model.DbUserUsedExperiment.user_id)).select_from(joined)

    user_id_cache = {}
    for use in session.execute(files_query):
        use_id = use['id']
        user_id = use['user_id']
        file_hash = use['file_hash']
        login = use['login']
        # login = '******' % user_id
        start_date = use['start_date']
        # full_name = use['full_name']
        # user_id_cache[user_id] = u'%s (%s)' % (full_name, login)
        user_id_cache[user_id] = login
        hashes[file_hash].append((use_id, user_id, start_date, login))

    if not hashes:
        return {}, {}

    # No group by since there is no easy way to order correctly, and the amount of data is not
    # huge
    query = sql.select(
        [model.DbUserFile.file_hash, model.DbRole.name],
        sql.and_(
            model.DbUserFile.experiment_use_id ==
            model.DbUserUsedExperiment.id,
            model.DbUserUsedExperiment.user_id == model.DbUser.id,
            model.DbUser.role_id == model.DbRole.id,
            model.DbUserFile.file_hash.in_(hashes.keys()))).order_by(
                model.DbUserUsedExperiment.start_date)

    correct_file_hashes = set()
    for file_hash, role_name in session.execute(query):
        if file_hash in correct_file_hashes:
            continue

        if role_name in ('administrator', 'professor', 'admin', 'instructor'):
            hashes.pop(file_hash, None)
        else:
            correct_file_hashes.add(file_hash)

    # Filter those hashes which were first used by users who were instructor, admin, etc.
    # It's only a problem when the file has been previously submitted by someone who was not a teacher
    links = defaultdict(list)

    # With the remaining, calculate the copies
    for file_hash in hashes:
        # Get first in course
        first_use_id, first_user_id, use_datetime, login = hashes[file_hash][0]
        distinct_user_ids = set([
            user_id
            for use_id, user_id, use_datetime, login in hashes[file_hash]
            if user_id != first_user_id
        ])
        for user_id in distinct_user_ids:
            links[user_id_cache[first_user_id]].append(user_id_cache[user_id])

    return links, hashes
Пример #51
0
def purchase_ticket():
    """

    Function to allow the user to purchase a ticket based on movies,
    theaters, and their schedules

    """

    print("****************** PURCHASE TICKETS ******************")
    print()

    if not state.active_account:
        print("You must be logged in to purchase a ticket.")
        return

    # Get account credentials that were created on registration
    account = state.active_account

    # Grab the theater_schedule objects
    schedules = session.query(theater_schedule).all()

    print("\nMOVIE THEATER SCHEDULES\n")

    # List all available movies and theaters and times
    # with index loop so they can input a number representing an object
    # that will later get mapped to elements of tuples appended to a list
    index = 0
    for i in schedules:
        theater = session.query(Theater).filter_by(id=i.theater_id).first()
        movie = session.query(Movie).filter_by(id=i.movie_id).first()
        index += 1
        print(
            f"""{index}: {theater.name} {theater.address}, Prices: {theater.ticket_price} 
        {movie.title}, Schedules: {i.time}, Seats: {i.seats_available}\n""")

    ticket_number = input("\nEnter ticket number: ").strip()
    ticket_number = int(ticket_number) - 1

    quantity = input("How many tickets would you like to purchase: ").strip()
    quantity = int(quantity)

    category = input("Which category of tickets (i.e. Adult/Child): ").strip()

    theaters_list = []
    # Creat a tuple of the required information to purchase a ticket
    # along with an index so the user can select a tuple
    for i, x in enumerate(schedules, 1):
        theater = session.query(Theater).filter_by(id=x.theater_id).first()
        movie = session.query(Movie).filter_by(id=x.movie_id).first()
        payment_id = random_number_generator()
        payment_id = int(payment_id)
        tup = (i, theater.id, movie.id, x.time, payment_id, account.id)
        theaters_list.append(tup)

    my_ticket = theaters_list[ticket_number]

    # I need to figure out the price for the category chosen for
    # this particular theater outside of the loop because we don't want to do this for every theater
    my_theater = session.query(Theater).filter_by(id=my_ticket[1]).first()
    # my_movie is used later for the receipt information
    my_movie = session.query(Movie).filter_by(id=my_ticket[2]).first()

    ticket_price = float(my_theater.ticket_price[category])
    total = ticket_price * quantity

    ticket = Ticket(theater_id=my_ticket[1],
                    movie_id=my_ticket[2],
                    time=my_ticket[3],
                    payment_id=my_ticket[4],
                    account_id=my_ticket[5],
                    quantity=quantity,
                    total=total)

    payment = Payment(id=my_ticket[4],
                      credit_card=account.credit_card,
                      paid=True)

    session.add(ticket)
    session.add(payment)
    session.commit()

    # I think there's gotta be a better way to do this, but what it's supposed to do
    # is update the value of seats_available in theater_schedule
    # everytime someone purchases a ticket
    my_theater_schedule = session.query(theater_schedule).filter_by(
        theater_id=my_ticket[1], movie_id=my_ticket[2],
        time=my_ticket[3]).first()
    new_seats_available = my_theater_schedule.seats_available - quantity
    # SQLAlchemy Expression Language as the theaters_schedule is a Table construct
    # and not an ORM object
    engine.execute(
        update(theater_schedule).where(
            and_(theater_schedule.c.theater_id == my_ticket[1],
                 theater_schedule.c.movie_id == my_ticket[2],
                 theater_schedule.c.time == my_ticket[3])).values(
                     seats_available=new_seats_available))

    ticket_receipt = session.query(Ticket).filter_by(id=ticket.id).first()

    print("\nYour receipt: \n")
    print(
        f"""Movie: {my_movie.title} | Location: {my_theater.name} at {my_theater.address} 
    Time: {ticket_receipt.time} | Quantity: {ticket_receipt.quantity} tickets 
    Total Price: ${total} \n

    Payment Id: {payment.id} | Date of Purchase: {ticket_receipt.created.date()}"""
    )

    print("\nEnjoy your movie!\n")
Пример #52
0
class Test(db.Model, TypenameMixin, StatusPredicatesMixin, HasSubjectsMixin,
           UserDetailsMixin, TimespanMixin):

    id = db.Column(db.Integer, primary_key=True)

    test_index = db.Column(db.Integer)
    updated_at = db.Column(db.DateTime(),
                           onupdate=datetime.now,
                           index=True,
                           nullable=True)
    test_info_id = db.Column(db.Integer,
                             db.ForeignKey('test_information.id',
                                           ondelete='CASCADE'),
                             index=True)
    test_info = db.relationship('TestInformation', lazy='joined')

    test_variation_id = db.Column(db.Integer,
                                  db.ForeignKey('test_variation.id',
                                                ondelete='CASCADE'),
                                  index=True)
    test_variation = db.relationship('TestVariation', lazy='joined')

    subject_instances = db.relationship(
        'SubjectInstance',
        secondary=session_subject,
        primaryjoin='Test.session_id==session_subject.c.session_id',
        lazy='joined',
        order_by=session_subject.c.ordinal)

    user = db.relationship('User',
                           secondary=Session.__table__,
                           primaryjoin='Test.session_id==Session.id',
                           secondaryjoin='Session.user_id==User.id',
                           lazy='joined',
                           uselist=False)

    metadatas = db.relationship('TestMetadata', lazy='dynamic')

    parameters = db.Column(JSONB)

    @rendered_field
    def variation(self):
        v = self.test_variation
        if v is None:
            return None
        return v.variation

    @rendered_field
    def session_display_id(self):
        return self.session.logical_id or self.session.id

    @rendered_field
    def is_session_abandoned(self):
        return self.session.is_abandoned()

    scm = db.Column(db.String(5), default=None)
    scm_dirty = db.Column(db.Boolean, server_default='false')
    scm_revision = db.Column(db.String(40), default=None)
    scm_local_branch = db.Column(db.String(256), default=None, nullable=True)
    scm_remote_branch = db.Column(db.String(256), default=None, nullable=True)
    file_hash = db.Column(db.String(40), default=None)

    session_id = db.Column(db.Integer,
                           db.ForeignKey('session.id', ondelete='CASCADE'),
                           index=True)

    logical_id = db.Column(db.String(256), index=True, unique=True)
    start_time = db.Column(db.Float, default=None, index=True)
    end_time = db.Column(db.Float, default=None, index=True)

    errors = db.relationship('Error')
    comments = db.relationship('Comment',
                               primaryjoin='Comment.test_id==Test.id')

    first_error_obj = db.relationship(
        lambda: Error,
        primaryjoin=lambda: and_(
            Test.id == Error.test_id,  # pylint: disable=undefined-variable
            Error.timestamp == select([func.min(Error.timestamp)]).where(
                Error.test_id == Test.id).correlate(Test.__table__)),
        uselist=False,
        lazy='joined')

    @rendered_field
    def first_error(self):
        if self.first_error_obj is None:
            return None
        return render_api_object(self.first_error_obj,
                                 only_fields={'message', 'exception_type'})

    @rendered_field
    def first_error_id(self):
        if self.first_error_obj is None:
            return None
        return self.first_error_obj.id

    last_comment_obj = db.relationship(
        lambda: Comment,
        primaryjoin=lambda: and_(
            Test.id == Comment.test_id, Comment.timestamp == select([
                func.max(Comment.timestamp)
            ]).where(Comment.test_id == Test.id).correlate(Test.__table__)),
        uselist=False,
        lazy='joined')

    @rendered_field
    def last_comment(self):
        comment = self.last_comment_obj
        if comment is None:
            return None

        return {'comment': comment.comment, 'user_email': comment.user.email}

    related_entities = db.relationship('Entity', secondary='test_entity')

    is_interactive = db.Column(db.Boolean, server_default='FALSE')

    status = db.Column(db.String(20), nullable=False, default=statuses.STARTED)
    status_description = db.Column(db.String(1024), nullable=True)

    skip_reason = db.Column(db.Text(), nullable=True)

    num_errors = db.Column(db.Integer, default=0)
    num_failures = db.Column(db.Integer, default=0)
    num_comments = db.Column(db.Integer, default=0)
    num_warnings = db.Column(db.Integer, nullable=False, server_default="0")
    num_interruptions = db.Column(db.Integer, default=0)

    __table_args__ = (
        Index('ix_test_start_time', start_time.desc()),
        Index('ix_test_session_id_start_time', session_id, start_time),
        Index('ix_test_status_lower_start_time', func.lower(status),
              start_time.desc()),
        Index('ix_test_start_time_status_lower', start_time.desc(),
              func.lower(status)),
        Index('ix_test_test_info_id_start_time', test_info_id,
              start_time.desc()),
        Index('ix_test_timespan', 'timespan', postgresql_using='gist'),
        Index('ix_test_updated_at',
              updated_at.asc(),
              postgresql_where=(updated_at != None)),
        Index('ix_test_updated_at_id', updated_at.asc(), id.asc()),
    )

    @rendered_field
    def duration(self):
        if self.end_time is None or self.start_time is None:
            return None
        return self.end_time - self.start_time

    @rendered_field
    def info(self):
        return {
            attr: getattr(self.test_info, attr)
            for attr in ('file_name', 'class_name', 'name')
        }
Пример #53
0
    def showWerknemer(idx):
        maccountnr = idx.data()
        if idx.column() == 0:
            engine = create_engine('postgresql+psycopg2://postgres@localhost/bisystem')
            conn = engine.connect()
            selwrknmr = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID\
                  == maccountnr, werknemers.c.accountID == accounts.c.accountID,\
                  lonen.c.loonID == werknemers.c.loonID))
            rpwrknmr = conn.execute(selwrknmr).first()
                                                   
            class Widget(QDialog):
                 def __init__(self, parent=None):
                    super(Widget, self).__init__(parent)
                    self.setWindowTitle("Werknemersgegevens opvragen")
                    self.setWindowIcon(QIcon('./images/logos/logo.jpg')) 
                                          
                    self.setFont(QFont('Arial', 10))
                        
                    self.Accountnummer = QLabel()
                    q2Edit = QLineEdit(str(maccountnr))
                    q2Edit.setFixedWidth(90)
                    q2Edit.setDisabled(True)
                    q2Edit.setFont(QFont("Arial",10))
        
                    self.Loontabelnummer = QLabel()
                    q4Edit = QLineEdit(str(rpwrknmr[2]))
                    q4Edit.setFixedWidth(30)
                    q4Edit.setFont(QFont("Arial",10))
                    q4Edit.setDisabled(True)
         
                    self.Loontrede = QLabel()
                    q5Edit = QLineEdit(str(rpwrknmr[3]))
                    q5Edit.setFixedWidth(30)
                    q5Edit.setFont(QFont("Arial",10))
                    q5Edit.setDisabled(True)                              
                    
                    self.Reiskostenvergoeding = QLabel()
                    q8Edit = QLineEdit(str(rpwrknmr[12]))
                    q8Edit.setFixedWidth(100)
                    q8Edit.setFont(QFont("Arial",10))
                    q8Edit.setDisabled(True)
                                                         
                    self.Auto = QLabel()
                    q18Edit = QLineEdit(str(round(rpwrknmr[11],2)))
                    q18Edit.setFixedWidth(70)
                    q18Edit.setFont(QFont("Arial",10))
                    q18Edit.setDisabled(True)
                      
                    self.Periodiekeuitkering = QLabel()
                    q12Edit = QLineEdit(str(rpwrknmr[8]))
                    q12Edit.setFixedWidth(100)
                    q12Edit.setFont(QFont("Arial",10))
                    q12Edit.setDisabled(True)
                     
                    self.Overigeinhoudingen = QLabel()
                    q13Edit = QLineEdit(str(rpwrknmr[9]))
                    q13Edit.setFixedWidth(100)
                    q13Edit.setFont(QFont("Arial",10))
                    q13Edit.setDisabled(True)
             
                    self.Overigevergoedingen = QLabel()
                    q19Edit = QLineEdit(str(rpwrknmr[10]))
                    q19Edit.setFixedWidth(100)
                    q19Edit.setFont(QFont("Arial",10))
                    q19Edit.setDisabled(True)
             
                    self.Indienst = QLabel()
                    q14Edit = QLineEdit(rpwrknmr[13])
                    q14Edit.setFixedWidth(100)
                    q14Edit.setFont(QFont("Arial",10))
                    q14Edit.setDisabled(True)
 
                    self.Maandloon = QLabel()
                    q15Edit = QLineEdit(str(round(rpwrknmr[25]*(1+(rpwrknmr[3]*3/100)),2)))
                    q15Edit.setDisabled(True)
                    q15Edit.setFixedWidth(100)
                    q15Edit.setFont(QFont("Arial",10))
                    q15Edit.setDisabled(True)
          
                    self.Verlofsaldo = QLabel()
                    q16Edit = QLineEdit(str(rpwrknmr[14]))
                    q16Edit.setFixedWidth(100)
                    q16Edit.setFont(QFont("Arial",10))
                    q16Edit.setDisabled(True)
         
                    self.ExtraVerlof = QLabel()
                    q17Edit = QLineEdit(str(rpwrknmr[15]))
                    q17Edit.setFixedWidth(100)
                    q17Edit.setFont(QFont("Arial",10))
                    q17Edit.setDisabled(True)
                            
                    grid = QGridLayout()
                    grid.setSpacing(20)
                    
                    lbl = QLabel()
                    pixmap = QPixmap('./images/logos/verbinding.jpg')
                    lbl.setPixmap(pixmap)
                    grid.addWidget(lbl ,1 , 0)
                    
                    logo = QLabel()
                    pixmap = QPixmap('./images/logos/logo.jpg')
                    logo.setPixmap(pixmap)
                    grid.addWidget(logo , 1, 3, 1, 1, Qt.AlignRight)
            
                    self.setFont(QFont('Arial', 10))
                    grid.addWidget(QLabel('Opvragen werknemergegevens van\n'+rpwrknmr[18]+\
                    ' '+rpwrknmr[19]+' '+rpwrknmr[20]+'\nGeboren: '+rpwrknmr[21]), 1, 1, 1, 3)
                    
                    grid.addWidget(QLabel('Bruto maandloon'), 3, 2)
                    grid.addWidget(q15Edit, 3, 3) 
                                                        
                    grid.addWidget(QLabel('Accountnummer'), 3, 0)
                    grid.addWidget(q2Edit, 3, 1)
                    
                    grid.addWidget(QLabel('Loontabel'), 6, 0)
                    grid.addWidget(q4Edit, 6 , 1) 
                     
                    grid.addWidget(QLabel('Loontrede'), 7, 0)
                    grid.addWidget(q5Edit, 7, 1)
                                                              
                    grid.addWidget(QLabel('Reiskostenvergoeding'), 4, 2)
                    grid.addWidget(q8Edit, 4, 3)
                                              
                    grid.addWidget(QLabel('Periodieke uitkering belast'), 5, 0)
                    grid.addWidget(q12Edit, 5, 1) 
                    
                    grid.addWidget(QLabel('Overige inhoudingen onbelast'), 5, 2)
                    grid.addWidget(q13Edit, 5, 3) 
                    
                    grid.addWidget(QLabel('Bijtelling Bedrijfsauto'), 4, 0)
                    grid.addWidget(q18Edit, 4, 1) 
                                   
                    grid.addWidget(QLabel('Overige Vergoedingen\nonbelast'), 6, 2)
                    grid.addWidget(q19Edit, 6, 3) 
               
                    grid.addWidget(QLabel('Datum indiensttreding'), 8, 0)
                    grid.addWidget(q14Edit, 8, 1) 
                    
                    grid.addWidget(QLabel('Verlofsaldo in uren'), 7, 2)
                    grid.addWidget(q16Edit, 7, 3)
                    
                    grid.addWidget(QLabel('Extra verlof in uren'), 8, 2)
                    grid.addWidget(q17Edit, 8, 3)
                    
                    grid.addWidget(QLabel('Uurloon'), 9, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[23], 2))), 9, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Reisuurloon'), 9, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[24], 2))), 9, 3)
                    grid.addWidget(QLabel('Pensioenpremie'), 10, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[5], 2))), 10, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Reservering vakantietoeslag'), 10, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[6], 2))), 10, 3) 
                    grid.addWidget(QLabel('Werkgever pensioenpremie'), 11, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[7], 2))), 11, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Loonheffing'), 11, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[4], 2))), 11, 3) 
                    
                    grid.addWidget(QLabel('\u00A9 2017 all rights reserved [email protected]'), 13, 0, 1, 4, Qt.AlignCenter)
                    self.setLayout(grid)
                    self.setGeometry(500, 200, 350, 300)
                                               
                    cancelBtn = QPushButton('Sluiten')
                    cancelBtn.clicked.connect(self.close)
                
                    grid.addWidget(cancelBtn, 12, 3)
                    cancelBtn.setFont(QFont("Arial",10))
                    cancelBtn.setFixedWidth(100)
                    cancelBtn.setStyleSheet("color: black;  background-color: gainsboro")
            
            window = Widget()
            window.exec_()
Пример #54
0
def query_comments(request):
    """
    Search for comments matching given search parameters.

    Args:
        request (pyramid.request): The current request.
    Return:
        dict: A dictionary with the following key-value pairs:
            comments: An iterable with the current page of matched comments.
            page: The current page number.
            pages: The total number of pages.
            rows_per_page: The number of rows per page.
            total: The number of items matching the search terms.
            chrome: A boolean indicating whether to paginate or not.
    """
    db = request.db
    data = request.validated
    query = db.query(Comment)

    like = data.get('like')
    if like is not None:
        query = query.filter(or_(*[Comment.text.like('%%%s%%' % like)]))

    packages = data.get('packages')
    if packages is not None:
        query = query\
            .join(Comment.update)\
            .join(Update.builds)\
            .join(Build.package)
        query = query.filter(or_(*[Build.package == pkg for pkg in packages]))

    since = data.get('since')
    if since is not None:
        query = query.filter(Comment.timestamp >= since)

    updates = data.get('updates')
    if updates is not None:
        query = query.filter(or_(*[Comment.update == u for u in updates]))

    update_owner = data.get('update_owner')
    if update_owner is not None:
        query = query.join(Comment.update)
        query = query.filter(or_(*[Update.user == u for u in update_owner]))

    ignore_user = data.get('ignore_user')
    if ignore_user is not None:
        query = query.filter(and_(*[Comment.user != u for u in ignore_user]))

    # don't show bodhi user comments in the web interface
    if data.get("chrome"):
        query = query.filter(and_(*[Comment.user != User.get('bodhi')]))

    user = data.get('user')
    if user is not None:
        query = query.filter(or_(*[Comment.user == u for u in user]))

    query = query.order_by(Comment.timestamp.desc())

    # We can't use ``query.count()`` here because it is naive with respect to
    # all the joins that we're doing above.
    count_query = query.with_labels().statement\
        .with_only_columns([func.count(distinct(Comment.id))])\
        .order_by(None)
    total = db.execute(count_query).scalar()

    page = data.get('page')
    rows_per_page = data.get('rows_per_page')
    pages = int(math.ceil(total / float(rows_per_page)))
    query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)

    return dict(
        comments=query.all(),
        page=page,
        pages=pages,
        rows_per_page=rows_per_page,
        total=total,
        chrome=data.get('chrome'),
    )
Пример #55
0
    'media_count':
        column_property(
            sql.select(
                [sql.func.count(media.c.id)],
                media.c.podcast_id == podcasts.c.id,
            ).label('media_count'),
            deferred=True,
            doc="The total number of :class:`mediacore.model.media.Media` episodes."
        ),
    'media_count_published':
        column_property(
            sql.select(
                [sql.func.count(media.c.id)],
                sql.and_(
                    media.c.podcast_id == podcasts.c.id,
                    media.c.reviewed == True,
                    media.c.encoded == True,
                    media.c.publishable == True,
                    media.c.publish_on <= datetime.now(),
                    sql.or_(
                        media.c.publish_until == None,
                        media.c.publish_until >= datetime.now()
                    ),
                )
            ).label('media_count_published'),
            deferred=True,
            doc="The number of :class:`mediacore.model.media.Media` episodes that are currently published."
        )
})
def backfix_shard(shard_id, dry_run):
    categories_to_fix = []
    with session_scope_by_shard_id(shard_id) as db_session:
        # 'SELECT id FROM <table> GROUP BY <x>' does not select _all_ of the
        # ids in the group. MySQL chooses one id and returns it. The id chosen
        # is indeterminate. So we find the duplicate
        # (namespace_id, display_name, name) pairs and use them to query
        # for specific Category rows
        category_query = db_session.query(Category.namespace_id,
                                          Category.display_name, Category.name)

        duplicate_attrs = category_query. \
                group_by(Category.display_name,
                         Category.namespace_id,
                         Category.name).having(
                            func.count(Category.id) > 1).all()

    for namespace_id, display_name, name in duplicate_attrs:
        duplicates = db_session.query(Category.id). \
                filter(Category.namespace_id == namespace_id,
                       Category.display_name == display_name,
                       Category.name == name).all()

        # duplicates is an array of tuples where each tuple is
        # (Category.id,). We flatten the tuples here so that each item in
        # categories_to_fix is a list of category ids that are duplicates
        categories_to_fix.append([item for item in chain(*duplicates)])

    categories_affected = 0
    categories_to_delete = []
    # Categories_to_fix is a list of tuples where each tuple
    # contains the duplicate categories
    for grouped_categories in categories_to_fix:
        # Keep track of categories with associated message categories
        categories_with_messages = []

        # It is possible for Messages to be associated with
        # more than one category. We choose the Category with
        # the lowest pk to be the "master" and all other duplicate
        # categories are deleted and their messages consolidated
        # into the master
        grouped_categories.sort()
        master_id = grouped_categories[0]

        # Iterate over all of the duplicate categories except master
        for category_id in grouped_categories[1:]:
            categories_affected += 1
            with session_scope_by_shard_id(shard_id) as db_session:
                associated_messages = db_session.query(exists().where(
                    MessageCategory.category_id == category_id)).scalar()

                # if category has messages, they need to be de-duped
                # and consolidated
                if associated_messages:
                    log.info('Category has associated messages',
                             category_id=category_id)
                    categories_with_messages.append(category_id)

                # if category does not have messages, it can be deleted
                else:
                    categories_to_delete.append(category_id)
                    log.info('Category does not have associated messages',
                             category_id=category_id)

        if len(categories_with_messages) > 0:
            log.info('Consolidating messages into category',
                     category_id=master_id)

            for category_id in categories_with_messages:
                try:
                    with session_scope_by_shard_id(shard_id) as db_session:
                        messagecategories = db_session.query(MessageCategory).\
                                filter(MessageCategory.category_id == category_id).all()  # noqa

                        for mc in messagecategories:
                            # Its possible for a message to be associated with
                            # what we've declared to be the master category
                            # and the category we want to delete.
                            # MessageCategory has a unique constraint on
                            # (message_id, category_id) so we first query to
                            # see such an object exists. If it does, we
                            # point the MessageCategory to the master
                            # category. If it does not, then simply delete it
                            mc_exists = db_session.query(exists().where(and_(
                                MessageCategory.category_id == master_id,
                                MessageCategory.message_id == mc.message_id)))\
                                        .scalar()

                            if not dry_run:
                                # If mc_exists == True, then there's a
                                # MessageCategory associated with the master
                                # and the current category, so we can delete
                                # the current category
                                if mc_exists:
                                    mc.delete()
                                else:
                                    # Master does not have a MessageCategory
                                    # for this message. Update this one to
                                    # point to the master
                                    mc.category_id = master_id
                                db_session.commit()

                            log.info('Updated MessageCategory',
                                     mc_id=mc.id,
                                     old_category_id=mc.category_id,
                                     new_category_id=master_id)

                    categories_to_delete.append(category_id)
                except Exception as e:
                    log.critical(
                        'Exception encountered while consolidating'
                        ' messagecategories',
                        e=str(e))
                    raise e

            # We REALLY don't want to delete the category we consolidated all
            # of the messagecategories into
            assert master_id not in categories_to_delete

        for category_id in categories_to_delete:
            if dry_run:
                log.info('Delete category', category_id=category_id)
                continue

            with session_scope_by_shard_id(shard_id) as db_session:
                db_session.query(Category).filter_by(id=category_id).delete()
                log.info('Deleted category', category_id=category_id)

            categories_to_delete.remove(category_id)

    log.info('Completed category migration on shard',
             categories_affected=categories_affected,
             shard_id=shard_id)
def find_agent_func_key_id(agent_id, action):
    query = (sql.select([dest_agent_table.c.func_key_id]).where(
        sql.and_(dest_agent_table.c.agent_id == agent_id,
                 dest_agent_table.c.action == ACTIONS[action])))

    return op.get_bind().execute(query).scalar()
Пример #58
0
def toonWerknemers(keuze,zoekterm, m_email):
    import validZt
    metadata = MetaData()
    werknemers = Table('werknemers', metadata,
        Column('accountID', None, ForeignKey('accounts.accountID')),
        Column('werknemerID', Integer(), primary_key=True),
        Column('loonID', None, ForeignKey('lonen.loonID')), 
        Column('loontrede', Integer),
        Column('loonheffing', Float),
        Column('pensioenpremie', Float),
        Column('reservering_vakantietoeslag', Float),
        Column('werkgevers_pensioenpremie', Float),
        Column('periodieke_uitkeringen', Float),
        Column('overige_inhoudingen', Float),
        Column('overige_vergoedingen', Float),
        Column('bedrijfsauto', Float),
        Column('reiskosten_vergoeding', Float),
        Column('indienst', String),
        Column('verlofsaldo', Float),
        Column('extraverlof', Float))
    accounts = Table('accounts', metadata,
        Column('accountID', Integer, primary_key=True),
        Column('aanhef', String(8)),
        Column('voornaam', String(30), nullable=False), 
        Column('tussenvoegsel', String(10)),
        Column('achternaam', String(50), nullable=False),
        Column('geboortedatum', String))
    lonen = Table('lonen', metadata,
        Column('loonID', Integer, primary_key=True),
        Column('tabelloon', Float),
        Column('reisuur', Float),
        Column('maandloon', Float))
    
    engine = create_engine('postgresql+psycopg2://postgres@localhost/bisystem')
    conn = engine.connect()
    
    if keuze == 1:
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID)).order_by(werknemers.c.accountID)
    elif keuze == 2:
       selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, accounts.c.achternaam.\
                     ilike('%'+zoekterm+'%'))).order_by(werknemers.c.accountID)
    elif keuze == 3 and validZt.zt(zoekterm, 13):
       selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, werknemers.c.loonID == int(zoekterm))).\
                        order_by(werknemers.c.loonID, werknemers.c.accountID)
    elif keuze == 4 and validZt.zt(zoekterm, 1):
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, werknemers.c.accountID == int(zoekterm)))
    elif keuze == 5 and validZt.zt(zoekterm, 14):
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, lonen.c.tabelloon < int(zoekterm))).\
                         order_by(lonen.c.tabelloon)
    elif keuze == 6 and validZt.zt(zoekterm, 14):
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, lonen.c.tabelloon > int(zoekterm))).\
                         order_by(lonen.c.tabelloon)
    elif keuze == 7 and validZt.zt(zoekterm, 14):
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, lonen.c.maandloon > int(zoekterm))).\
                         order_by(lonen.c.tabelloon)
    elif keuze == 8 and validZt.zt(zoekterm, 10):
        selwerkn = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID == accounts.c.accountID,
                     werknemers.c.loonID == lonen.c.loonID, werknemers.c.indienst.\
                     like(zoekterm+'%'))).order_by(werknemers.c.indienst)
    else:
        ongInvoer()
        accKeuze(m_email)
        
    if conn.execute(selwerkn).fetchone():
        rpwerkn = conn.execute(selwerkn)
    else:
        geenRecord()
        accKeuze(m_email)
        
    class MyWindow(QDialog):
        def __init__(self, data_list, header, *args):
            QWidget.__init__(self, *args,)
            self.setGeometry(100, 50, 1600, 900)
            self.setWindowTitle('Werknemergegevens opvragen')
            self.setWindowIcon(QIcon('./images/logos/logo.jpg'))
            self.setWindowFlags(self.windowFlags()| Qt.WindowSystemMenuHint |
                    Qt.WindowMinMaxButtonsHint)
            table_model = MyTableModel(self, data_list, header)
            table_view = QTableView()
            table_view.setModel(table_model)
            font = QFont("Arial", 10)
            table_view.setFont(font)
            table_view.resizeColumnsToContents()
            table_view.setSelectionBehavior(QTableView.SelectRows)
            table_view.setColumnHidden(16,True)   
            table_view.clicked.connect(showWerknemer)
            layout = QVBoxLayout(self)
            layout.addWidget(table_view)
            self.setLayout(layout)
    
    class MyTableModel(QAbstractTableModel):
        def __init__(self, parent, mylist, header, *args):
            QAbstractTableModel.__init__(self, parent, *args)
            self.mylist = mylist
            self.header = header
        def rowCount(self, parent):
            return len(self.mylist)
        def columnCount(self, parent):
            return len(self.mylist[0])
        def data(self, index, role):
            if not index.isValid():
                return None
            elif role != Qt.DisplayRole:
                return None
            return str(self.mylist[index.row()][index.column()])
        def headerData(self, col, orientation, role):
            if orientation == Qt.Horizontal and role == Qt.DisplayRole:
                return self.header[col]
            return None
       
    header = ['Accountnummer', 'Werknemernummer', 'Loonschaal', 'Loontrede', 'Loonheffing',\
          'Pensioenpremie', 'Res. Vak.toeslag', 'Werkg. Pensioenpremie', 'Periodieke uitk.',\
          'Overige inhoudingen', 'Overige Vergoedingen','Bedrijfsauto bijtelling',\
          'Reiskostenvergoeding', 'Indienst datum', 'Verlof saldo', 'Extra verlof',\
          'Accountnummer', 'Aanhef', 'Voornaam', 'Tussenvoegsel', 'Achternaam',\
          'Geboortedatun', 'Loonschaal', 'Tabelloon', 'Reisuurloon', 'Maandloon']    
        
    data_list=[]
    for row in rpwerkn:
        data_list += [(row)] 
        
    def showWerknemer(idx):
        maccountnr = idx.data()
        if idx.column() == 0:
            engine = create_engine('postgresql+psycopg2://postgres@localhost/bisystem')
            conn = engine.connect()
            selwrknmr = select([werknemers, accounts, lonen]).where(and_(werknemers.c.accountID\
                  == maccountnr, werknemers.c.accountID == accounts.c.accountID,\
                  lonen.c.loonID == werknemers.c.loonID))
            rpwrknmr = conn.execute(selwrknmr).first()
                                                   
            class Widget(QDialog):
                 def __init__(self, parent=None):
                    super(Widget, self).__init__(parent)
                    self.setWindowTitle("Werknemersgegevens opvragen")
                    self.setWindowIcon(QIcon('./images/logos/logo.jpg')) 
                                          
                    self.setFont(QFont('Arial', 10))
                        
                    self.Accountnummer = QLabel()
                    q2Edit = QLineEdit(str(maccountnr))
                    q2Edit.setFixedWidth(90)
                    q2Edit.setDisabled(True)
                    q2Edit.setFont(QFont("Arial",10))
        
                    self.Loontabelnummer = QLabel()
                    q4Edit = QLineEdit(str(rpwrknmr[2]))
                    q4Edit.setFixedWidth(30)
                    q4Edit.setFont(QFont("Arial",10))
                    q4Edit.setDisabled(True)
         
                    self.Loontrede = QLabel()
                    q5Edit = QLineEdit(str(rpwrknmr[3]))
                    q5Edit.setFixedWidth(30)
                    q5Edit.setFont(QFont("Arial",10))
                    q5Edit.setDisabled(True)                              
                    
                    self.Reiskostenvergoeding = QLabel()
                    q8Edit = QLineEdit(str(rpwrknmr[12]))
                    q8Edit.setFixedWidth(100)
                    q8Edit.setFont(QFont("Arial",10))
                    q8Edit.setDisabled(True)
                                                         
                    self.Auto = QLabel()
                    q18Edit = QLineEdit(str(round(rpwrknmr[11],2)))
                    q18Edit.setFixedWidth(70)
                    q18Edit.setFont(QFont("Arial",10))
                    q18Edit.setDisabled(True)
                      
                    self.Periodiekeuitkering = QLabel()
                    q12Edit = QLineEdit(str(rpwrknmr[8]))
                    q12Edit.setFixedWidth(100)
                    q12Edit.setFont(QFont("Arial",10))
                    q12Edit.setDisabled(True)
                     
                    self.Overigeinhoudingen = QLabel()
                    q13Edit = QLineEdit(str(rpwrknmr[9]))
                    q13Edit.setFixedWidth(100)
                    q13Edit.setFont(QFont("Arial",10))
                    q13Edit.setDisabled(True)
             
                    self.Overigevergoedingen = QLabel()
                    q19Edit = QLineEdit(str(rpwrknmr[10]))
                    q19Edit.setFixedWidth(100)
                    q19Edit.setFont(QFont("Arial",10))
                    q19Edit.setDisabled(True)
             
                    self.Indienst = QLabel()
                    q14Edit = QLineEdit(rpwrknmr[13])
                    q14Edit.setFixedWidth(100)
                    q14Edit.setFont(QFont("Arial",10))
                    q14Edit.setDisabled(True)
 
                    self.Maandloon = QLabel()
                    q15Edit = QLineEdit(str(round(rpwrknmr[25]*(1+(rpwrknmr[3]*3/100)),2)))
                    q15Edit.setDisabled(True)
                    q15Edit.setFixedWidth(100)
                    q15Edit.setFont(QFont("Arial",10))
                    q15Edit.setDisabled(True)
          
                    self.Verlofsaldo = QLabel()
                    q16Edit = QLineEdit(str(rpwrknmr[14]))
                    q16Edit.setFixedWidth(100)
                    q16Edit.setFont(QFont("Arial",10))
                    q16Edit.setDisabled(True)
         
                    self.ExtraVerlof = QLabel()
                    q17Edit = QLineEdit(str(rpwrknmr[15]))
                    q17Edit.setFixedWidth(100)
                    q17Edit.setFont(QFont("Arial",10))
                    q17Edit.setDisabled(True)
                            
                    grid = QGridLayout()
                    grid.setSpacing(20)
                    
                    lbl = QLabel()
                    pixmap = QPixmap('./images/logos/verbinding.jpg')
                    lbl.setPixmap(pixmap)
                    grid.addWidget(lbl ,1 , 0)
                    
                    logo = QLabel()
                    pixmap = QPixmap('./images/logos/logo.jpg')
                    logo.setPixmap(pixmap)
                    grid.addWidget(logo , 1, 3, 1, 1, Qt.AlignRight)
            
                    self.setFont(QFont('Arial', 10))
                    grid.addWidget(QLabel('Opvragen werknemergegevens van\n'+rpwrknmr[18]+\
                    ' '+rpwrknmr[19]+' '+rpwrknmr[20]+'\nGeboren: '+rpwrknmr[21]), 1, 1, 1, 3)
                    
                    grid.addWidget(QLabel('Bruto maandloon'), 3, 2)
                    grid.addWidget(q15Edit, 3, 3) 
                                                        
                    grid.addWidget(QLabel('Accountnummer'), 3, 0)
                    grid.addWidget(q2Edit, 3, 1)
                    
                    grid.addWidget(QLabel('Loontabel'), 6, 0)
                    grid.addWidget(q4Edit, 6 , 1) 
                     
                    grid.addWidget(QLabel('Loontrede'), 7, 0)
                    grid.addWidget(q5Edit, 7, 1)
                                                              
                    grid.addWidget(QLabel('Reiskostenvergoeding'), 4, 2)
                    grid.addWidget(q8Edit, 4, 3)
                                              
                    grid.addWidget(QLabel('Periodieke uitkering belast'), 5, 0)
                    grid.addWidget(q12Edit, 5, 1) 
                    
                    grid.addWidget(QLabel('Overige inhoudingen onbelast'), 5, 2)
                    grid.addWidget(q13Edit, 5, 3) 
                    
                    grid.addWidget(QLabel('Bijtelling Bedrijfsauto'), 4, 0)
                    grid.addWidget(q18Edit, 4, 1) 
                                   
                    grid.addWidget(QLabel('Overige Vergoedingen\nonbelast'), 6, 2)
                    grid.addWidget(q19Edit, 6, 3) 
               
                    grid.addWidget(QLabel('Datum indiensttreding'), 8, 0)
                    grid.addWidget(q14Edit, 8, 1) 
                    
                    grid.addWidget(QLabel('Verlofsaldo in uren'), 7, 2)
                    grid.addWidget(q16Edit, 7, 3)
                    
                    grid.addWidget(QLabel('Extra verlof in uren'), 8, 2)
                    grid.addWidget(q17Edit, 8, 3)
                    
                    grid.addWidget(QLabel('Uurloon'), 9, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[23], 2))), 9, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Reisuurloon'), 9, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[24], 2))), 9, 3)
                    grid.addWidget(QLabel('Pensioenpremie'), 10, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[5], 2))), 10, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Reservering vakantietoeslag'), 10, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[6], 2))), 10, 3) 
                    grid.addWidget(QLabel('Werkgever pensioenpremie'), 11, 0)
                    grid.addWidget(QLabel(str(round(rpwrknmr[7], 2))), 11, 1, 1, 1, Qt.AlignRight) 
                    grid.addWidget(QLabel('Loonheffing'), 11, 2)
                    grid.addWidget(QLabel(str(round(rpwrknmr[4], 2))), 11, 3) 
                    
                    grid.addWidget(QLabel('\u00A9 2017 all rights reserved [email protected]'), 13, 0, 1, 4, Qt.AlignCenter)
                    self.setLayout(grid)
                    self.setGeometry(500, 200, 350, 300)
                                               
                    cancelBtn = QPushButton('Sluiten')
                    cancelBtn.clicked.connect(self.close)
                
                    grid.addWidget(cancelBtn, 12, 3)
                    cancelBtn.setFont(QFont("Arial",10))
                    cancelBtn.setFixedWidth(100)
                    cancelBtn.setStyleSheet("color: black;  background-color: gainsboro")
            
            window = Widget()
            window.exec_()
                                   
    win = MyWindow(data_list, header)
    win.exec_()
    accKeuze(m_email)
Пример #59
0
    else:
        meta = MetaData(engine)
        experiments = Table(table_name,
                            meta,
                            autoload=True,
                            autoload_with=engine)

        slct = experiments.select(
            and_(
                experiments.columns["split"] == split,
                experiments.columns["lambda"] == lambda_value,
                experiments.columns["seed"] == seed,
                experiments.columns["use_quadratic_transform"] ==
                use_quadratic_transform,
                experiments.columns["use_max_inverse_transform"] ==
                use_max_inverse_transform,
                experiments.columns["scale_target_to_unit_interval"] ==
                scale_target_to_unit_interval,
                experiments.columns["use_weighted_samples"] ==
                use_weighted_samples,
                experiments.columns["regularization_param"] ==
                regularization_param,
            )).limit(1)
        rs = connection.execute(slct)
        result = rs.first()
        if result == None:
            pass
        else:
            print(params_string, "Already in DB!")
            rs.close()
            connection.close()
def delete_mapping(func_key_id, template_id):
    query = (func_key_mapping_table.delete().where(
        sql.and_(func_key_mapping_table.c.func_key_id == func_key_id,
                 func_key_mapping_table.c.template_id == template_id)))

    op.get_bind().execute(query)