Пример #1
0
 def get_instance(self, db):
     """
         Decompresses the instance blob if necessary and returns it as string.
         EDACC can store compressed and uncompressed instances. To distinguish
         between them, we prepend the ASCII characters "LZMA" to a compressed instance.
     """
     table = db.metadata.tables['Instances']
     c_instance = table.c['instance']
     c_id = table.c['idInstance']
     # get prefix
     instance_header = db.session.connection().execute(select([func.substring(c_instance, 1, 4)],
                                                              c_id == self.idInstance).select_from(
         table)).first()[0]
     data_length = db.session.connection().execute(select([func.length(c_instance)],
                                                          c_id == self.idInstance).select_from(
         table)).first()[0]
     if data_length > 32 * 1024 * 1024:
         return "Instance too large for processing. Please use the EDACC GUI application."
     if instance_header == 'LZMA': # compressed instance?
         # get blob without LZMA prefix
         instance_blob = db.session.connection().execute(select([func.substring(c_instance, 5)],
                                                                c_id == self.idInstance).select_from(
             table)).first()[0]
         return utils.lzma_decompress(instance_blob)
     else:
         return self.instance
Пример #2
0
    def processBase(self, dbsession, params, thedict, location,  group1, group2):
        bases = dbsession.query(func.substring(Base.location,group1,group2).label('location'), func.count('*').\
         label('base_count'), select(["FROM_UNIXTIME(AVG(UNIX_TIMESTAMP(base.timestamp)))"]).label("average_age")).\
         group_by(func.substring(Base.location,group1, group2)).filter(Base.location.like("%s%%"%location))

        bases = performBaseFilter(bases, params)
        max_count = 0
        min_count = 99999999999

        for base_location,base_count,base_age in bases:
            row = int(base_location[-2])
            col = int(base_location[-1])
            thedict[row][col]['base_count'] = base_count
            if base_count > max_count:
                max_count = base_count
            if base_count < min_count:
                min_count = base_count
#                location_info['base_age'] = getCountColor(base_count)
        legenda = getCountLegenda(min=min_count,max=max_count, single='base', plural='bases')

        for base_location,base_count,base_age in bases:
            row = int(base_location[-2])
            col = int(base_location[-1])
            thedict[row][col]['base_age'] = getCountColor(base_count, min=min_count,max=max_count, single='base', plural='bases')
        def url_generator(**kw):
            new_url = update_params(self.request.url, page=kw['page'])
            return new_url
        bases = dbsession.query(Base).filter(Base.location.like("%s%%"%location))
        bases = performBaseFilter(bases, params)
        bases = paginate.Page(bases, page=int(self.request.params.get('page', 1)), items_per_page=200, url=url_generator, item_count=bases.count())
        return bases, legenda, thedict
Пример #3
0
 def test_substring(self):
     self.assert_compile(
         func.substring('abc', 1, 2),
         'SUBSTRING(%(substring_1)s FROM %(substring_2)s '
         'FOR %(substring_3)s)')
     self.assert_compile(func.substring('abc', 1),
                         'SUBSTRING(%(substring_1)s FROM %(substring_2)s)')
Пример #4
0
 def test_substring(self):
     self.assert_compile(
         func.substring('abc', 1, 2),
         'SUBSTRING(:substring_1 FROM :substring_2 '
         'FOR :substring_3)')
     self.assert_compile(func.substring('abc', 1),
                         'SUBSTRING(:substring_1 FROM :substring_2)')
Пример #5
0
 def test_substring(self):
     self.assert_compile(
         func.substring("abc", 1, 2),
         "SUBSTRING(:substring_1 FROM :substring_2 " "FOR :substring_3)",
     )
     self.assert_compile(
         func.substring("abc", 1),
         "SUBSTRING(:substring_1 FROM :substring_2)",
     )
Пример #6
0
 def test_substring(self):
     self.assert_compile(
         func.substring("abc", 1, 2),
         "SUBSTRING(:substring_1 FROM :substring_2 "
         "FOR :substring_3)",
     )
     self.assert_compile(
         func.substring("abc", 1),
         "SUBSTRING(:substring_1 FROM :substring_2)",
     )
Пример #7
0
def upgrade():
    op.execute(webservice.insert().values(name=SERVICE,
                                          login=SERVICE,
                                          passwd=func.substring(
                                              func.gen_salt('bf', 4), 8),
                                          acl=ACL,
                                          description=DESCRIPTION))
Пример #8
0
    def get_published_briefs(self):
        subquery = (
            db
            .session
            .query(
                BriefUser.brief_id,
                func.array_agg(func.substring(User.email_address, '@(.*)')).label('domain')
            )
            .join(User)
            .group_by(BriefUser.brief_id)
            .subquery()
        )
        result = (
            db
            .session
            .query(
                Brief.id,
                Brief.data['organisation'].astext.label('organisation'),
                Brief.published_at,
                Brief.withdrawn_at,
                Brief.data['title'].astext.label('title'),
                Brief.data['sellerSelector'].astext.label('openTo'),
                Brief.data['areaOfExpertise'].astext.label('brief_category'),
                Lot.name.label('brief_type'),
                subquery.columns.domain[1].label('publisher_domain')
            )
            .join(subquery, Brief.id == subquery.columns.brief_id)
            .join(Lot)
            .filter(Brief.published_at.isnot(None))
            .order_by(Brief.id)
            .all()
        )

        return [r._asdict() for r in result]
Пример #9
0
        def keywords_to_query_list(subject, keywords, fallback=False):
            res = []

            for kw in keywords:
                if fallback:
                    if not self.__oi.fuzzy:
                        res.append(or_(
                            func.levenshtein(func.substring(subject, 0, 50), func.substring(kw, 0, 50)) < 3,
                            subject.like("%" + kw.replace(r"%", "\%") + "%")
                        ))
                    else:
                        res.append(subject.like("%" + kw.replace(r"%", "\%") + "%"))
                else:
                    res.append(subject == kw)

            return res
Пример #10
0
def print_model_shards(length=3, video=False, channel=False):
    if channel:
        from rockpack.mainsite.services.video.models import Channel as model
    else:
        from rockpack.mainsite.services.video.models import VideoInstance as model
    for shard, in model.query.distinct().values(func.substring(model.id, 1, length)):
        print shard
Пример #11
0
def get_industry_all(level=1, adjust=True):
    """[summary]
    adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True})
    """
    clss = WIND_DB.ASHAREINDUSTRIESCLASSCITICS
    ind_code = WIND_DB.ASHAREINDUSTRIESCODE
    df = WIND_DB.query(
        clss.S_INFO_WINDCODE, clss.ENTRY_DT, clss.REMOVE_DT,
        ind_code.INDUSTRIESNAME).filter(ind_code.LEVELNUM == (level + 1))
    try:
        df = df.filter(
            func.substring(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func.
            substring(ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df()
    except:
        df = df.filter(
            func.substr(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func.substr(
                ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df()
    df.columns = ['sid', 'entry_dt', 'out_dt', 'ind']
    # 去除行业中的罗马数字
    p = re.compile(r"[^\u4e00-\u9fa5]")
    df.ind = df.ind.str.replace(p, '', regex=True)
    # 将综合金融放入非银行金融内
    if adjust:
        def ind_map(x):
            if x == '综合金融':
                return '非银行金融'
            elif x in ('多领域控股', '资产管理', '新兴金融服务'):
                return '多元金融'
            elif x in ('全国性股份制银行', '区域性银行'):
                return '股份制与城商行'
            else:
                return x
        df.ind = df.ind.map(ind_map)
    return df
Пример #12
0
 def path(self):
     return func.concat_ws(
         sql.text("'/'"),
         self.python_version,
         func.substring(self.name, sql.text("1"), sql.text("1")),
         self.name,
         self.filename,
     )
Пример #13
0
 def path(self):
     return func.concat_ws(
         "/",
         self.python_version,
         func.substring(self.name, 1, 1),
         self.name,
         self.filename,
     )
Пример #14
0
 def path(self):
     return func.concat_ws(
         sql.text("'/'"),
         self.python_version,
         func.substring(self.name, sql.text("1"), sql.text("1")),
         self.name,
         self.filename,
     )
Пример #15
0
 def path(self):
     return func.concat_ws(
         "/",
         self.python_version,
         func.substring(self.name, 1, 1),
         self.name,
         self.filename,
     )
Пример #16
0
def get_fund_nav(fund_list=None, begin_dt='20010101', end_dt='20990101'):
    """[获取基金净值]

    Keyword Arguments:
        fund_list {[str or iterable or None]} -- [list of funds or fund] (default: {None})
        begin_dt {str} -- [description] (default: {'20010101'})
        end_dt {str} -- [description] (default: {'20990101'})

    Returns:
        [pd.DataFrame] -- [f_sid|trade_dt|s_close]
    """

    table = WIND_DB.CHINAMUTUALFUNDNAV
    tmp_query = WIND_DB.query(table.F_INFO_WINDCODE, table.PRICE_DATE,
                              table.F_NAV_ADJFACTOR, table.F_NAV_UNIT).filter(
                                  table.PRICE_DATE >= begin_dt,
                                  table.PRICE_DATE <= end_dt).order_by(
                                      table.PRICE_DATE, table.F_INFO_WINDCODE)
    try:
        if isinstance(fund_list, str):
            tmp_query = tmp_query.filter(func.substring(
                table.F_INFO_WINDCODE, 1, 6) == fund_list[:6])
        elif isinstance(fund_list, Iterable):
            tmp_query = tmp_query.filter(func.substring(
                table.F_INFO_WINDCODE, 1, 6).in_([f[:6] for f in fund_list]))
        else:
            pass
    except:
        if isinstance(fund_list, str):
            tmp_query = tmp_query.filter(func.substr(
                table.F_INFO_WINDCODE, 1, 6) == fund_list[:6])
        elif isinstance(fund_list, Iterable):
            tmp_query = tmp_query.filter(func.substr(
                table.F_INFO_WINDCODE, 1, 6).in_([f[:6] for f in fund_list]))
        else:
            pass

    df = tmp_query.to_df()
    df.columns = ['f_sid', 'trade_dt', 'adjfactor', 'unit']
    df['s_close'] = df['adjfactor'] * df['unit']
    df.drop(['unit', 'adjfactor'], axis=1, inplace=True)
    trade_dt_list = CALENDAR.dates
    df = df[df.trade_dt.isin(trade_dt_list)].reset_index(drop=True)
    return df
Пример #17
0
 def get_compressed_instance(self, db):
     table = db.metadata.tables['Instances']
     c_instance = table.c['instance']
     c_id = table.c['idInstance']
     # get prefix
     instance_header = db.session.connection().execute(select([func.substring(c_instance, 1, 4)],
                                                              c_id == self.idInstance).select_from(
         table)).first()[0]
     data_length = db.session.connection().execute(select([func.length(c_instance)],
                                                          c_id == self.idInstance).select_from(
         table)).first()[0]
     if instance_header == 'LZMA': # compressed instance?
         # get blob without LZMA prefix
         instance_blob = db.session.connection().execute(select([func.substring(c_instance, 5)],
                                                                c_id == self.idInstance).select_from(
             table)).first()[0]
         return instance_blob, True
     else:
         return self.instance, False
Пример #18
0
        def keywords_to_query_list(subject, keywords, fallback=False):
            res = []

            for kw in keywords:
                if fallback:
                    if not self.__oi.fuzzy:
                        res.append(
                            or_(
                                func.levenshtein(
                                    func.substring(subject, 0, 50),
                                    func.substring(kw, 0, 50)) < 3,
                                subject.like("%" + kw.replace(r"%", "\%") +
                                             "%")))
                    else:
                        res.append(
                            subject.like("%" + kw.replace(r"%", "\%") + "%"))
                else:
                    res.append(subject == kw)

            return res
Пример #19
0
    def get(self):
        """
        Available tickets query API

        **return**: A JSON dictionary with values:
        - `code`: `int`, always equals to 0
        - `result`: `list` of dictionaries of ticket information:
            - `seat_type_id`: `int`
            - `seat_type_name`: `str`
            - `left_cnt`: `int`
            - `price`: `float`
        """
        session = DBSession()
        try:
            train_name = urllib.parse.unquote(request.args.get('train_name'))
            first_interval = int(
                urllib.parse.unquote(request.args.get('first_interval')))
            last_interval = int(
                urllib.parse.unquote(request.args.get('last_interval')))
            interval_list = get_interval_list(train_name, session)
            first_index = session.query(interval_list.c.interval_no) \
                .filter(interval_list.c.interval_id == first_interval) \
                .first() \
                .interval_no
            last_index = session.query(interval_list.c.interval_no) \
                .filter(interval_list.c.interval_id == last_interval) \
                .first() \
                .interval_no
            price_list = session.query(Price.seat_type_id, func.sum(Price.price).label('price')) \
                .join(interval_list, Price.interval_id == interval_list.c.interval_id) \
                .filter(interval_list.c.interval_no <= last_index, interval_list.c.interval_no >= first_index) \
                .group_by(Price.seat_type_id) \
                .subquery()
            seats_left = session.query(Seat.seat_type_id, SeatType.name, func.count().label('left_cnt')) \
                .join(SeatType, SeatType.seat_type_id == Seat.seat_type_id) \
                .join(Train, Train.train_id == Seat.train_id) \
                .filter(Train.train_name == train_name,
                        func.cast(func.substring(Seat.occupied, first_index, last_index - first_index + 1),
                                  BIGINT) == 0) \
                .group_by(Seat.seat_type_id, SeatType.name) \
                .subquery()
            resp = session.query(seats_left.c.seat_type_id, seats_left.c.name.label('seat_type_name'),
                                 seats_left.c.left_cnt, price_list.c.price) \
                .join(price_list, price_list.c.seat_type_id == seats_left.c.seat_type_id) \
                .all()
            resp = list(
                sorted(map(lambda x: dict(zip(x.keys(), x)), resp),
                       key=lambda x: x['seat_type_id']))
            return jsonify(result=resp, code=0)
        except:
            return jsonify(code=10, error='Query error')
        finally:
            session.close()
Пример #20
0
def glottocode(name, conn, codes=None):
    letters = slug(name)[:4].ljust(4, 'a')
    active = select([cast(func.substring(Languoid.id, 5), Integer).label('number')])\
        .where(Languoid.id.startswith(letters))
    legacy = select([cast(func.substring(LegacyCode.id, 5), Integer).label('number')])\
        .where(LegacyCode.id.startswith(letters))
    if not codes:
        known = union_all(active, legacy)
    else:
        dirty = select([cast(func.substring(literal_column('dirty'), 5), Integer).label('number')])\
            .select_from(func.unnest(list(codes)).alias('dirty'))\
            .where(literal_column('dirty').startswith(letters))
        known = union_all(active, legacy, dirty)
    number = conn.execute(select([func.coalesce(func.max(literal_column('number') + 1), 1234)])\
        .select_from(known.alias())).scalar()
    number = str(number)
    assert len(number) == 4
    res = letters + number
    assert GLOTTOCODE_PATTERN.match(res)
    if codes is not None:
        codes[res] = True
    return res
Пример #21
0
def makeLocationDict(dbsession, location, group1, group2, row_index, col_index, do_base=True):
    if do_base:
        bases = dbsession.query(func.substring(Base.location,group1,group2).label('location'), func.count('*').\
         label('base_count'), select(["FROM_UNIXTIME(AVG(UNIX_TIMESTAMP(base.timestamp)))"]).label("average_age")).\
         group_by(func.substring(Base.location,group1,group2)).filter(Base.location.like("%s%%"%location))


    astros = dbsession.query(Location.location,func.count(Location.location).label('astro_count'))
    astros = astros.filter(Location.location.like("%s%%"%location))
    astros = astros.group_by(func.substring(Location.location,group1,group2))

    locations = {}
    for row in range(0,10):
        locations[row] = {}
        for col in range(0,10):
            locations[row][col]={'astro_count':0, 'base_count':0, 'base_age':{}}

    for astro,astro_count in astros:
        row = int(astro[row_index])
        col = int(astro[col_index])
        location_info = locations[row][col]
        location_info['astro_count'] = astro_count
        if do_base:
            location_info['base_count'] = 0
            location_info['base_age'] = {}
        locations[row][col] = location_info

    if do_base:
        for base_location,base_count,base_age in bases:
            row = int(base_location[-2])
            col = int(base_location[-1])
            location_info = locations[row][col]
            location_info['base_count'] = base_count
            location_info['base_age'] = getAgeColorTimedelta(datetime.now() - base_age)
            locations[row][col] = location_info

    return locations
Пример #22
0
    def test_as_comparison(self):

        fn = func.substring("foo", "foobar").as_comparison(1, 2)
        is_(fn.type._type_affinity, Boolean)

        self.assert_compile(
            fn.left, ":substring_1",
            checkparams={'substring_1': 'foo'})
        self.assert_compile(
            fn.right, ":substring_1",
            checkparams={'substring_1': 'foobar'})

        self.assert_compile(
            fn, "substring(:substring_1, :substring_2)",
            checkparams={"substring_1": "foo", "substring_2": "foobar"})
Пример #23
0
def build_seach_equipment_query(items_model, search_value):
    # Probably it is better to make filters like in Django: date_analyse__icontains="value"
    return db.session\
        .query(items_model)\
        .outerjoin(items_model.test_reason)\
        .outerjoin(items_model.test_type)\
        .outerjoin(items_model.test_status)\
        .outerjoin(items_model.equipment)\
        .outerjoin(items_model.campaign)\
        .outerjoin(Campaign.created_by)\
        .filter(or_(
            cast(TestResult.date_analyse, String).ilike("%{}%".format(search_value)),
            cast(TestResult.id, String).ilike("%{}%".format(search_value)),
            concat(cast(TestResult.id, String),
                   func.substring(User.name, r'^([a-zA-Z]{1})'),
                   func.substring(User.name, r'\s([a-zA-Z]){1}'))
                .ilike("%{}%".format(search_value)),
            TestReason.name.ilike("%{}%".format(search_value)),
            TestType.name.ilike("%{}%".format(search_value)),
            TestStatus.name.ilike("%{}%".format(search_value)),
            Equipment.serial.ilike("%{}%".format(search_value)),
            Equipment.equipment_number.ilike("%{}%".format(search_value)),
            ))\
        .all()
Пример #24
0
def build_seach_equipment_query(items_model, search_value):
    # Probably it is better to make filters like in Django: date_analyse__icontains="value"
    return db.session\
        .query(items_model)\
        .outerjoin(items_model.test_reason)\
        .outerjoin(items_model.test_type)\
        .outerjoin(items_model.test_status)\
        .outerjoin(items_model.equipment)\
        .outerjoin(items_model.campaign)\
        .outerjoin(Campaign.created_by)\
        .filter(or_(
            cast(TestResult.date_analyse, String).ilike("%{}%".format(search_value)),
            cast(TestResult.id, String).ilike("%{}%".format(search_value)),
            concat(cast(TestResult.id, String),
                   func.substring(User.name, r'^([a-zA-Z]{1})'),
                   func.substring(User.name, r'\s([a-zA-Z]){1}'))
                .ilike("%{}%".format(search_value)),
            TestReason.name.ilike("%{}%".format(search_value)),
            TestType.name.ilike("%{}%".format(search_value)),
            TestStatus.name.ilike("%{}%".format(search_value)),
            Equipment.serial.ilike("%{}%".format(search_value)),
            Equipment.equipment_number.ilike("%{}%".format(search_value)),
            ))\
        .all()
Пример #25
0
    def get_published_briefs(self):
        subquery = (db.session.query(
            BriefUser.brief_id,
            func.array_agg(
                func.substring(User.email_address,
                               '@(.*)')).label('domain')).join(User).group_by(
                                   BriefUser.brief_id).subquery())
        result = (db.session.query(
            Brief.id, Brief.data['organisation'].astext.label('organisation'),
            Brief.published_at, Brief.withdrawn_at,
            Brief.data['title'].astext.label('title'),
            Brief.data['sellerSelector'].astext.label('openTo'),
            Brief.data['areaOfExpertise'].astext.label('brief_category'),
            Lot.name.label('brief_type'),
            subquery.columns.domain[1].label('publisher_domain')).join(
                subquery,
                Brief.id == subquery.columns.brief_id).join(Lot).filter(
                    Brief.published_at.isnot(None)).order_by(Brief.id).all())

        return [r._asdict() for r in result]
    def is_user_block(self, ret, uid):
        try:
            mongo_logger = get_mongo_logger()
            userdao = UserDAO()

            session = Session()
            result = session.query(User) \
                            .filter(User.id == uid) \
                            .one()

            key = userdao.get_user_salt_by_id(result.id)[:16]
            crypto = aes.MyCrypto(key)
            phone = crypto.decodeAES(result.phone)

            row = session.query(BlockUser, User) \
                         .join(User, User.id == BlockUser.user_id) \
                         .filter(func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16)) == phone) \
                         .all()

            print row

            session.close()
            if len(row) > 0:
                self.set_status(Response.RESULT_OK)
                add_err_ko_message_to_response(
                    ret,
                    '현재 주문량이 많아 신규 예약이 불가능 합니다. 확실한 서비스 품질을 보증을 위해 바로 서비스를 제공해 드릴 수 없는 점 양해 부탁드립니다.'
                )
                mongo_logger.debug('block user', extra={'user_id': uid})
                ret['err_code'] = '4037'  # 임시 처리
                return True

            return False

        except Exception, e:
            session.rollback()

            print_err_detail(e)
            self.set_status(Response.RESULT_SERVERERROR)
            add_err_message_to_response(ret, err_dict['err_mysql'])
            return True
Пример #27
0
def getNetlocPrefixes(netloc, length=2):
	# print("Netloc prefixes")
	# SELECT DISTINCT(substring(title for {len})) FROM book_items WHERE lower(title) LIKE %s AND src=%s;

	# So... the .distinct() operator on a query apparently cannot take a function
	# as a parameter, because stupid or something. As such, we hack the query we
	# want together as a return entity
	substr_chunk  = func.substring(func.upper(database.WebPages.title), 0, length)
	# distinct_hack = func.distinct(substr_chunk)

	query = database.get_session().query(database.WebPages)                 \
		.filter(database.WebPages.is_text == True)                    \
		.filter(database.WebPages.file == None)                       \
		.filter(database.WebPages.state == 'complete')                \
		.filter(database.WebPages.netloc == netloc)                   \
		.group_by(substr_chunk)                                       \
		.order_by(substr_chunk)                                       \
		.with_entities(substr_chunk, func.min(database.WebPages.id), func.min(database.WebPages.netloc))

	vals = query.all()
	return vals
Пример #28
0
def get_dxcc_from_clublog_or_database(callsign):
    response = {}
    dxcc_database = None
    dxcc_clublog = get_dxcc_from_clublog(callsign)

    if not dxcc_clublog:
        # Trying fallback from database
        q = (DxccPrefixes.query.filter(DxccPrefixes.call == func.substring(
            callsign, 1, func.LENGTH(DxccPrefixes.call))).order_by(
                func.length(DxccPrefixes.call).asc()).limit(1).first())
        if q:
            dxcc_database = {
                "CQZ": int(q.cqz),
                "Continent": q.cont,
                "DXCC": q.adif,
                "Lat": q.lat,
                "Lon": q.long,
                "Name": q.entity,
                "PermKomi": False,
            }

    if not dxcc_clublog and not dxcc_database:
        # We have nothing at all :(
        raise InvalidUsage("Error while getting infos from clublog",
                           status_code=500)

    if dxcc_clublog or dxcc_database:
        response["status"] = "ok"
        if dxcc_clublog:
            response.update(dxcc_clublog)
            response["source"] = "clublog"
        else:
            response.update(dxcc_database)
            response["source"] = "database"

    return response
Пример #29
0
 def test_substring(self):
     self.assert_compile(func.substring('abc', 1, 2),
                         'SUBSTRING(:substring_1 FROM :substring_2 '
                         'FOR :substring_3)')
     self.assert_compile(func.substring('abc', 1),
                         'SUBSTRING(:substring_1 FROM :substring_2)')
Пример #30
0
    def post(self):
        """
        Train order API, **JWT required**

        **return**: A JSON dictionary with values:
        - `code`: `int`, equals to 0 if order is successful
        - `result`: `dict` with values, shown if `code == 0`:
            - `order_id`: `int`
        - `error`: `str`, shown if `code != 0`
        """
        session = DBSession()
        try:
            user_id = get_jwt_identity()
            body = request.get_json()
            first_interval = int(body.get('first_interval'))
            last_interval = int(body.get('last_interval'))
            seat_class = body.get('seat_class')
            train_name = body.get('train_name')
            # successive_train_rec = get_interval_list(train_name, session)
            interval_list = get_interval_list(train_name, session)
            # interval_list = session.query(successive_train_rec.c.interval_id) \
            #     .order_by(successive_train_rec.c.interval_id) \
            #     .all()
            first_index = session.query(interval_list.c.interval_no) \
                .filter(interval_list.c.interval_id == first_interval) \
                .first() \
                .interval_no
            last_index = session.query(interval_list.c.interval_no) \
                .filter(interval_list.c.interval_id == last_interval) \
                .first() \
                .interval_no
            seat = session.query(Seat) \
                .join(Train, Train.train_id == Seat.train_id) \
                .filter(Train.train_name == train_name,
                        Seat.seat_type_id == seat_class,
                        func.cast(func.substring(Seat.occupied, first_index, last_index - first_index + 1),
                                  BIGINT) == 0) \
                .first()
            if seat is None:
                return jsonify(code=404, error='当前区间无余票!')
            seat.occupied = bin(
                int(seat.occupied, 2)
                | int('1' * (last_index - first_index + 1), 2) <<
                (40 - last_index))[2:].zfill(40)
            new_ticket = Ticket(first_interval=first_interval,
                                last_interval=last_interval,
                                seat_id=seat.seat_id,
                                available=False)
            session.add(new_ticket)
            session.commit()
            session.flush()
            price = session.query(func.sum(Price.price).label('price')) \
                .join(interval_list, Price.interval_id == interval_list.c.interval_id) \
                .filter(Price.seat_type_id == seat_class, interval_list.c.interval_no <= last_index,
                        interval_list.c.interval_no >= first_index) \
                .first()
            new_order = Order(order_status='unpaid',
                              user_id=user_id,
                              ticket_id=new_ticket.ticket_id,
                              price=price)
            session.add(new_order)
            session.commit()
            session.flush()
            return jsonify(code=0, result={'order_id': new_order.order_id})
        except:
            session.rollback()
            return jsonify(code=10, error='订票失败')
        finally:
            session.close()
    def post(self):
        self.set_header("Content-Type", "application/json")
        self.set_header('Access-Control-Allow-Origin', '*')

        subject = self.get_argument('subject', '')
        content = self.get_argument('content', '')
        content_type = self.get_argument('content_type', '')

        print "subject : " + subject
        print "content : " + content
        print "content_type : " + content_type

        ret = {}
        phone_numbers = ""
        sms_sender = SMS_Sender()

        try:
            session = Session()
            if content_type == 'all_masters':
                print 'into masters'
                masters = session.query(Master) \
                                 .filter(Master.phone != 'out') \
                                 .filter(Master.active == 1) \
                                 .filter(func.length(Master.phone) < 12) \
                                 .all()

                for master in masters:
                    phone_numbers += master.phone + ","

            elif content_type == 'all_users':
                print 'into users'
                result  = session.query(func.group_concat(func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16)))) \
                                 .filter(func.length(func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16))) < 12) \
                                 .filter(User.phone != 'out') \
                                 .filter(User.active == 1) \
                                 .filter(not_(User.email.op('regexp')(r'._$'))) \
                                 .all()

                phone_numbers = result[0][0]

            elif content_type == 'booking_users':
                print 'into booking_users'
                result  = session.query(func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16))) \
                                 .join(Booking, Booking.user_id == User.id) \
                                 .filter(Booking.cleaning_status == 0) \
                                 .distinct() \
                                 .all()

                for row in result:
                    phone_numbers += row[0] + ","

                print phone_numbers

            sms_sender.send2(mtype='lms',
                             to=phone_numbers,
                             subject=subject,
                             text=content)

            ret['response'] = Response.SUCCESS
            self.set_status(Response.RESULT_OK)

        except NoResultFound, e:
            session.close()
            self.set_status(Response.RESULT_OK)
            add_err_ko_message_to_response(ret, '조건에 만족하는 결과가 존재하지 않습니다.')
            return
Пример #32
0
    def post(self):
        self.set_header("Content-Type", "application/json")

        mode        = self.get_argument('mode', 'week') # today, week, all
        search_term = self.get_argument('search_term', '')

        ret = {}

        now = dt.datetime.now().date()
        week_ago = now - dt.timedelta(days = 7)

        try:
            session = Session()

            users = []
            userdao = UserDAO()

            total_cnt = session.query(User).count()

            query = session.query(User, UserAddress, UserMemo) \
                            .outerjoin(UserDefaultAddress, UserDefaultAddress.user_id == User.id) \
                            .outerjoin(UserAddress, and_(UserDefaultAddress.user_id == UserAddress.user_id, UserDefaultAddress.address_idx == UserAddress.user_addr_index)) \
                            .outerjoin(UserMemo, User.id == UserMemo.user_id)

            if mode == 'week':
                query = query.filter(func.date(User.dateofreg) >= week_ago)
            elif mode == 'today':
                query = query.filter(func.date(User.dateofreg) == now)

            if search_term != '':
                print 'searching term : ' + search_term
                #search_term = search_term.lower()

                query = query.filter(or_(func.aes_decrypt(func.from_base64(User.name), func.substring(User.salt, 1, 16)).like('%' + search_term + '%'), 
                                         func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16)).like(search_term + '%'),
                                         User.email.like(search_term + '%'),
                                         User.id == search_term))

            result = query.order_by(desc(User.dateofreg)).all()

            for row in result:
                key = userdao.get_user_salt(row.User.email)[:16]
                
                if key == None or key == '':
                    print row.User.email
                    continue

                crypto = aes.MyCrypto(key)

                user_info = {}
                user_info['user_id']    = row.User.id
                user_info['dateofreg']  = dt.datetime.strftime(row.User.dateofreg, '%Y-%m-%d %H:%M')
                user_info['devicetype'] = row.User.devicetype
                user_info['name']       = crypto.decodeAES(row.User.name)
                user_info['email']      = row.User.email
                user_info['phone']      = crypto.decodeAES(row.User.phone)
                user_info['gender']     = row.User.gender
                user_info['birthdate']  = crypto.decodeAES(row.User.dateofbirth)
                user_info['default_address'] = crypto.decodeAES(row.UserAddress.address) if row.UserAddress != None else ''
                user_info['default_address_size'] = row.UserAddress.size if row.UserAddress != None else ''
                user_info['default_address_kind'] = row.UserAddress.kind if row.UserAddress != None else ''
                user_info['memo']       = row.UserMemo.memo if row.UserMemo != None else ''

                #if search_term != '':
                #    if search_term.lower() in user_info['name'].lower() or user_info['phone'] == search_term or user_info['email'] == search_term:
                users.append(user_info)

            #if search_term != '':
            #    users = filter(lambda x : search_term.lower() in x['name'].lower() or x['phone'] == search_term or x['email'] == search_term, users)

            ret['response'] = {'count' : total_cnt, 'users' : users}
            self.set_status(Response.RESULT_OK)

        except Exception, e:
            session.rollback()

            print_err_detail(e)
            self.set_status(Response.RESULT_SERVERERROR)
            add_err_message_to_response(ret, err_dict['err_mysql'])
            func.sum(Census.pop2008 - Census.pop2000) /
            func.sum(Census.pop2000),
            singular='Population Pct Change',
            # formatters=[
            #     lambda x: "Change is {0:0.1%} percent".format(
            #         x)]
        ),
    }

    # Dimensions are ways to split the data.
    dimension_shelf = {
        # Simplest possible dimension, a SQLAlchemy expression and a label.
        'state':
        Dimension(Census.state, singular='State', plural='States', format=""),
        'first_letter_state':
        Dimension(func.substring(Census.state, 1, 1), label='State'),
        'age':
        Dimension(Census.age, singular='Age', plural='Ages'),
        'age_bands':
        Dimension(case([(Census.age < 21, 'Under 21'),
                        (Census.age < 49, '21-49')],
                       else_='Other'),
                  label='Age Bands'),
        # This will use the lookup to get display values of "M" and "F"
        'sex':
        LookupDimension(Census.sex,
                        singular='Gender',
                        plural='Genders',
                        lookup={
                            'M': 'Menfolk',
                            "F": "Womenfolk"
Пример #34
0
def send_survey_link():
    update_time = dt.datetime(2016, 9, 27)

    session = Session()
    result  = session.query(User) \
                     .filter(func.length(func.aes_decrypt(func.from_base64(User.phone), func.substring(User.salt, 1, 16))) < 12) \
                     .filter(User.phone != 'out') \
                     .filter(User.active == 1) \
                     .filter(not_(User.email.op('regexp')(r'._$'))) \
                     .all()

    sms_sender = Message_Sender()

    for row in result:
        key = row.salt[:16]
        crypto = aes.MyCrypto(key)

        name = crypto.decodeAES(row.name)
        phone = crypto.decodeAES(row.phone)
        print name, phone

        text = '''(광고) 홈마스터 설문조사참여하고 신세계 상품권 받으세요^^
https://goo.gl/kYNti3
~12.31'''
        print sms_sender.send(sender=MAIN_CALL,
                              mtype='lms',
                              to=str(phone),
                              text=str(text))
Пример #35
0
def _archive_access_log(engine, session, default_domain_name):
    try:
        Temp = declarative_base()

        class AccessLogToArchive(Temp):
            __tablename__ = 'accessLogToArchive'
            __table_args__ = {'prefixes': ['TEMPORARY']}
            id = Column(Integer, primary_key=True)
            time_since_epoch = Column(Numeric(15, 3))
            ip_client = Column(CHAR(15))
            http_status_code = Column(String(10))
            http_reply_size = Column(Integer)
            http_url = Column(Text)
            http_username = Column(String(100))
            userId = Column(Integer)
            archived = Column(Boolean)

        Temp.metadata.create_all(bind=engine)

        # Fill temporary table with unarchived chunk of data
        access_log_subquery = session.query(
            AccessLog.id,
            AccessLog.time_since_epoch,
            AccessLog.ip_client,
            AccessLog.http_status_code,
            AccessLog.http_reply_size,
            AccessLog.http_url,
            case([(
                sqlalchemy.or_(
                    AccessLog.http_username.contains('@'),
                    AccessLog.http_username.contains('\\'),
                    AccessLog.http_username == '-'),
                AccessLog.http_username)],
                else_=AccessLog.http_username + '@' + default_domain_name).label('http_username'),
            AccessLog.archived).filter(is_(AccessLog.archived, None)).limit(1000000)  # limit to prevent overload on a huge amount of data

        ins = insert(AccessLogToArchive).from_select([
            'id', 'time_since_epoch', 'ip_client', 'http_status_code', 'http_reply_size',
            'http_url', 'http_username', 'archived'], access_log_subquery)

        session.execute(ins)

        query_result = session.query(AccessLogToArchive.http_username, User.cn).filter(
            and_(User.authMethod == 0, User.userPrincipalName == AccessLogToArchive.http_username)).all()

        # Set user ID field
        session.query(AccessLogToArchive).filter(
            or_(
                and_(User.authMethod == 0, User.userPrincipalName == AccessLogToArchive.http_username),
                and_(User.authMethod == 1, User.ip == AccessLogToArchive.ip_client))).update(
            {AccessLogToArchive.userId: User.id}, synchronize_session=False)

        session.query(AccessLog).filter(AccessLog.id == AccessLogToArchive.id).update(
            {AccessLog.userId: AccessLogToArchive.userId}, synchronize_session=False)

        # Get host from URL: strip protocol
        session.query(AccessLogToArchive).filter(func.locate('://', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.substring(AccessLogToArchive.http_url, func.locate('://', AccessLogToArchive.http_url) + 3)},
            synchronize_session=False)

        # Get host from URL: strip port and the rest
        session.query(AccessLogToArchive).filter(func.locate(':', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.left(AccessLogToArchive.http_url, func.locate(':', AccessLogToArchive.http_url) - 1)},
            synchronize_session=False)

        # Get host from URL: strip everything after the first slash
        session.query(AccessLogToArchive).filter(func.locate('/', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.left(AccessLogToArchive.http_url, func.locate('/', AccessLogToArchive.http_url) - 1)},
            synchronize_session=False)

        # Make summary traffic table
        subquery = session.query(
            func.date(func.from_unixtime(AccessLogToArchive.time_since_epoch)).label('date'),
            AccessLogToArchive.userId,
            AccessLogToArchive.http_url.label('host'),
            func.sum(AccessLogToArchive.http_reply_size).label('traffic')).\
            filter(AccessLogToArchive.http_status_code.like('2%')).\
            group_by(
                func.date(func.from_unixtime(AccessLogToArchive.time_since_epoch)),
                AccessLogToArchive.userId,
                AccessLogToArchive.http_url).\
            having(func.sum(AccessLogToArchive.http_reply_size) > 0).subquery()

        # Update existing rows
        session.query(AccessLogArchive).filter(
            AccessLogArchive.date == subquery.c.date,
            AccessLogArchive.userId == subquery.c.userId,
            AccessLogArchive.host == subquery.c.host).\
            update({AccessLogArchive.traffic: AccessLogArchive.traffic + subquery.c.traffic}, synchronize_session=False)

        # Insert new rows
        access_log_subquery = session.query(subquery).outerjoin(
            AccessLogArchive, and_(
                AccessLogArchive.date == subquery.c.date,
                AccessLogArchive.userId == subquery.c.userId,
                AccessLogArchive.host == subquery.c.host)).\
            filter(AccessLogArchive.id.is_(None), subquery.c.userId.isnot(None))

        ins = insert(AccessLogArchive).from_select(['date', 'userId', 'host', 'traffic'], access_log_subquery)

        session.execute(ins)

        # Mark access log chunk as archived
        session.query(AccessLog).filter(
            AccessLog.id == AccessLogToArchive.id).\
            update({AccessLog.archived: 1}, synchronize_session=False)

        session.commit()

        '''
        # Drop temp table
        AccessLogToArchive.__table__.drop(engine)
        '''
    except:
        session.rollback()
        raise
Пример #36
0
 def test_substring(self):
     self.assert_compile(func.substring('abc', 1, 2),
                         'SUBSTRING(%(substring_1)s FROM %(substring_2)s '
                         'FOR %(substring_3)s)')
     self.assert_compile(func.substring('abc', 1),
                         'SUBSTRING(%(substring_1)s FROM %(substring_2)s)')
    def post(self):
        self.set_header("Content-Type", "application/json")
        self.set_header('Access-Control-Allow-Origin', '*')

        search_term = self.get_argument('search_term', '')

        ret = {}

        try:
            session = Session()

            users = []
            userdao = UserDAO()

            if search_term == None or search_term == '' or len(
                    search_term) == 1 or search_term == '010':
                self.set_status(Response.RESULT_OK)
                add_err_ko_message_to_response(ret, '잘못된 파라미터 입니다.')
                return

            total_cnt = session.query(User).count()
            now = dt.datetime.now()

            query = session.query(User, UserAddress, UserMemo) \
                            .outerjoin(UserDefaultAddress, UserDefaultAddress.user_id == User.id) \
                            .outerjoin(UserAddress, and_(UserDefaultAddress.user_id == UserAddress.user_id, UserDefaultAddress.address_idx == UserAddress.user_addr_index)) \
                            .outerjoin(UserMemo, User.id == UserMemo.user_id)

            if search_term != '':
                print 'searching term : ' + search_term
                #search_term = search_term.lower()

                query = query.filter(
                    or_(
                        func.aes_decrypt(
                            func.from_base64(User.name),
                            func.substring(User.salt, 1,
                                           16)).like('%' + search_term + '%'),
                        func.aes_decrypt(func.from_base64(User.phone),
                                         func.substring(User.salt, 1,
                                                        16)).like(search_term +
                                                                  '%'),
                        User.email.like(search_term + '%'),
                        User.id == search_term))

            result = query.order_by(desc(User.dateofreg)).all()

            for row in result:
                key = userdao.get_user_salt(row.User.email)[:16]
                if key == None or key == '':
                    continue

                crypto = aes.MyCrypto(key)

                user_info = {}
                user_info['user_id'] = row.User.id
                user_info['dateofreg'] = dt.datetime.strftime(
                    row.User.dateofreg, '%Y-%m-%d %H:%M')
                user_info['devicetype'] = row.User.devicetype
                user_info['name'] = crypto.decodeAES(row.User.name)
                user_info['email'] = row.User.email
                user_info['authsource'] = row.User.authsource
                user_info['phone'] = crypto.decodeAES(row.User.phone)
                user_info['gender'] = row.User.gender
                user_info['active'] = row.User.active
                user_info['birthdate'] = crypto.decodeAES(row.User.dateofbirth)
                user_info['default_address'] = crypto.decodeAES(
                    row.UserAddress.address) if row.UserAddress != None else ''
                user_info[
                    'default_address_size'] = row.UserAddress.size if row.UserAddress != None else ''
                user_info[
                    'default_address_kind'] = row.UserAddress.kind if row.UserAddress != None else ''
                user_info[
                    'memo'] = row.UserMemo.memo if row.UserMemo != None else ''
                user_info['is_b2b'] = row.User.is_b2b
                #user_info['all_addresses'] = userdao.get_all_user_addresses(row.User.id)

                users.append(user_info)

            ret['response'] = {'count': total_cnt, 'users': users}
            self.set_status(Response.RESULT_OK)

        except Exception, e:
            session.rollback()

            print_err_detail(e)
            self.set_status(Response.RESULT_SERVERERROR)
            add_err_message_to_response(ret, err_dict['err_mysql'])
Пример #38
0
def generate_spreadsheet(context, request):
    date_start = request.params.get('start')
    date_end = request.params.get('end')
    session = DBSession()

    client = request.gclient['DocsClient']
    service = request.gservice['SpreadsheetsService']
    iteration_folder = get_iteration_folder(request)
    settings = get_current_registry().settings

    if not date_start or not date_end:
        params = {'validation_error': 'Please select date range'}
        return manage_iterations(context, request, **params)

    if not iteration_folder:
        params = {'validation_error':
                       'Iteration folder is missing. Please create folder in google docs with title: %s' % \
                       settings.get('penelope.core.iteration_folder')}
        return manage_iterations(context, request, **params)

    users = session.query(User.email, User.fullname)\
                   .group_by(User.email, User.fullname)\
                   .join(User.roles).filter(Role.id.ilike('%developer%'))\
                   .order_by(func.substring(User.fullname, '([^[:space:]]+)(?:,|$)'))

    projects = session.query(Project).join(Customer)\
                      .filter(Project.active)\
                      .order_by(Customer.name, Project.name)


    dockey = settings.get('penelope.core.iteration_template')
    resourceid = 'document%%3A%s' % dockey
    template = client.GetDoc(resourceid)
    entry = client.copy(template, 'Iteration from %s to %s' % (date_start,
                                                               date_end))
    client.move(entry, iteration_folder)

    sp_id = entry.resource_id.text.split(':')[1]
    wk = service.GetWorksheetsFeed(sp_id).entry[0]
    wk.title.text = '%s_%s' % (date_start, date_end)
    wk = service.UpdateWorksheet(wk)
    wk_id = wk.id.text.split('/')[-1]

    query = gdata.spreadsheet.service.CellQuery()
    query['return-empty'] = 'true'
    query['min-col'] = '1'
    query['min-row'] = '1'
    query['max-col'] = '19'
    query['max-row'] = '106'
    cells = service.GetCellsFeed(sp_id, wksht_id=wk_id, query=query)
    batchRequest = gdata.spreadsheet.SpreadsheetsCellsFeed()

    def update_cell(cells, values):
        for cell,value in zip(cells,values):
            cell.cell.inputValue = str(value)
            batchRequest.AddUpdate(cell)

    update_cell([cells.entry[19]],['Iteration from %s to %s' % (date_start, date_end)])
    #Add user fullnames
    update_cell(cells.entry[5:18], [a.fullname for a in users])
    #Add user working days
    dae = _get_calendars_events(users, request)
    dr = [parse(date_start), parse(date_end)]
    update_cell(cells.entry[24:37], [get_working_days(dr,dict(dae).get(u.email,[])) for u in users])
    #Add project names
    update_cell(cells.entry[76::19], [str(a) for a in projects])
    #Add project managers
    update_cell(cells.entry[77::19], [a.manager for a in projects])

    service.ExecuteBatch(batchRequest, cells.GetBatchLink().href)
    return manage_iterations(context, request)
Пример #39
0
def get_dataset_srid_alchemy_expression(md: MetadataType,
                                        default_crs: str = None):
    doc = md.dataset_fields['metadata_doc'].alchemy_expression

    if 'grid_spatial' not in md.definition['dataset']:
        # Non-spatial product
        return None

    projection_offset = md.definition['dataset']['grid_spatial']

    # Most have a spatial_reference field we can use directly.
    spatial_reference_offset = projection_offset + ['spatial_reference']
    spatial_ref = doc[spatial_reference_offset].astext

    # When datasets have no CRS, optionally use this as default.
    default_crs_expression = None
    if default_crs:
        if not default_crs.lower().startswith('epsg:'):
            raise NotImplementedError(
                "CRS expected in form of 'EPSG:1234'. Got: %r" % default_crs)

        auth_name, auth_srid = default_crs.split(':')
        default_crs_expression = select([
            SPATIAL_REF_SYS.c.srid
        ]).where(func.lower(SPATIAL_REF_SYS.c.auth_name) == auth_name.lower(
        )).where(SPATIAL_REF_SYS.c.auth_srid == int(auth_srid)).as_scalar()

    expression = func.coalesce(
        case(
            [(
                # If matches shorthand code: eg. "epsg:1234"
                spatial_ref.op("~")(r"^[A-Za-z0-9]+:[0-9]+$"),
                select([SPATIAL_REF_SYS.c.srid]).where(
                    func.lower(SPATIAL_REF_SYS.c.auth_name) ==
                    func.lower(func.split_part(spatial_ref, ':', 1))).where(
                        SPATIAL_REF_SYS.c.auth_srid == func.split_part(
                            spatial_ref, ':', 2).cast(Integer)).as_scalar())],
            else_=None),
        case(
            [(
                # Plain WKT that ends in an authority code.
                # Extract the authority name and code using regexp. Yuck!
                # Eg: ".... AUTHORITY["EPSG","32756"]]"
                spatial_ref.op("~")
                (r'AUTHORITY\["[a-zA-Z0-9]+", *"[0-9]+"\]\]$'),
                select([SPATIAL_REF_SYS.c.srid]).where(
                    func.lower(SPATIAL_REF_SYS.c.auth_name) == func.lower(
                        func.substring(
                            spatial_ref,
                            r'AUTHORITY\["([a-zA-Z0-9]+)", *"[0-9]+"\]\]$'))
                ).where(SPATIAL_REF_SYS.c.auth_srid == func.substring(
                    spatial_ref, r'AUTHORITY\["[a-zA-Z0-9]+", *"([0-9]+)"\]\]$'
                ).cast(Integer)).as_scalar())],
            else_=None),
        # Some older datasets have datum/zone fields instead.
        # The only remaining ones in DEA are 'GDA94'.
        case([
            (doc[(projection_offset + ['datum'])].astext
             == 'GDA94', select([
                 SPATIAL_REF_SYS.c.srid
             ]).where(func.lower(SPATIAL_REF_SYS.c.auth_name) == 'epsg').where(
                 SPATIAL_REF_SYS.c.auth_srid == ('283' + func.abs(doc[
                     (projection_offset + ['zone'])].astext.cast(Integer))
                                                 ).cast(Integer)).as_scalar())
        ],
             else_=None),
        default_crs_expression,
        # TODO: Handle arbitrary WKT strings (?)
    )
    # print(as_sql(expression))
    return expression
Пример #40
0
    def __call__(self):
        dbsession = DBSession()
        fleets = []
        locationlength = 12
        grouper = ''
        params = None
        params = self.request.params
        if "form.submitted" in self.request.params:
            def url_generator(**kw):
                new_url = update_params(self.request.url, page=kw['page'])
                return new_url
            params = self.request.params
#            if 'group_by' in self.request.params and self.request.params.get('group_by') != '':
            fleets = dbsession.query(Fleet,
                                    func.sum(Fleet.size).label("fleet_sum"),
                                    select(["FROM_UNIXTIME(UNIX_TIMESTAMP(fleet.timestamp))"]).label("average_age"),
                                    select(["FROM_UNIXTIME(UNIX_TIMESTAMP(fleet.detailstimestamp))"]).label("average_detail_age"),
                                    select(["fleet.arrival-(unix_timestamp(now())-unix_timestamp(fleet.timestamp))"]).label("absolute_arrival"),
                                    )
#            else:
#                fleets = dbsession.query(Fleet)

           
            if 'location' in self.request.params and self.request.params.get('location') != '':
                locations = self.request.params.get('location').strip().split(',')
                qrs = []
                for location in locations:
                    qrs.append(Fleet.location.like("%s%%" % location))
                fleets = fleets.filter(or_(*qrs))
            fleets = performFleetFilter(fleets, params)
            ##Grouping
            if 'group_by' in self.request.params and self.request.params.get('group_by') != '':
                grouper = self.request.params.get('group_by')
        #                ('owner-01','Location, Owner'),
        #                ('owner-01','Galaxy, Owner'),
        #                ('owner-02','Galaxy-Group, Owner')
        #                ('blob-01','Location, Guild'),
        #                ('blob-02','Galaxy, Guild'),
        #                ('blob-03','Galaxy-Group, Guild'),
                fleets = fleets.join(Fleet.owner)
                fleets = fleets.join(Player.guild)
                if grouper == 'owner-01':
                    fleets = fleets.group_by(Fleet.location, Fleet.ownerid)
                if grouper == 'owner-02':
                    fleets = fleets.group_by(func.substring(Fleet.location,1,3), Fleet.ownerid)
                    locationlength = 3
                if grouper == 'owner-03':
                    fleets = fleets.group_by(func.substring(Fleet.location,1,2), Fleet.ownerid)
                    locationlength = 2
                if grouper == 'blob-01':
                    fleets = fleets.group_by(Fleet.location, GuildInfo.id)
                if grouper == 'blob-02':
                    fleets = fleets.group_by(func.substring(Fleet.location,1,3), GuildInfo.id)
                    locationlength = 3
                if grouper == 'blob-03':
                    fleets = fleets.group_by(func.substring(Fleet.location,1,2), GuildInfo.id)
                    locationlength = 2
            else:
                fleets = fleets.group_by(Fleet.id)
                
            ##Sorting
            if 'sort_field' in self.request.params and self.request.params.get('sort_field') != '':
                fleets = performFleetSort(fleets, self.request.params)

            ##Limits and such
            per_page = int(self.request.params.get('per_page') or 100)
            fleets = paginate.Page(fleets, page=int(self.request.params.get('page', 1)), items_per_page=per_page, url=url_generator, item_count=fleets.count())
#            import pdb; pdb.set_trace()


        return {"datetime":datetime,
                "grouper":grouper,
                "filters": self.filters,
                "fieldfilters":self.fieldfilters,
                "fleets": fleets,
                "params": params,
                "locationlength":locationlength}
Пример #41
0
    def __call__(self):
        dbsession = DBSession()
        returnvalue = {'location':None}
        location = "A"
        location = "%s%s" % (location, self.request.matchdict.get("galaxy"))

        regions = {}
        legenda = None
        bases = None
        params = {}

        base_search = False
        fleet_search = False
        
        if "form.base_submitted" in self.request.params:
            base_search = True
            params = self.request.params
        elif "form.fleet_submitted" in self.request.params:
            fleet_search = True
            params = self.request.params
        elif self.request.cookies.get('search','') == 'base':
            base_search = True
            params = self.request.cookies
        elif self.request.cookies.get('search','') == 'fleet':
            fleet_search = True
            params = self.request.cookies

        # .outerjoin(Base).filter(Location.location.like("%s%%"%location))
        if base_search:
            regions = makeLocationDict(dbsession, location, 1, 6, 4, 5, do_base=False)
            bases, legenda, regions = self.processBase(dbsession, params, regions, location,  1, 6)
            label = ''
            cookie_key = ['owner','occupier','jumpgate','agevalue', 'maxagefield']
            cookie_set= False

            if "form.base_submitted" in self.request.params:
                for key in cookie_key:
                    if self.request.params.get(key,''):
                        cookie_set = True
                        self.request.response.set_cookie(key, self.request.params.get(key), max_age=timedelta(days=1), path='/')
                    else:
                        self.request.response.delete_cookie(key)

                    if cookie_set:
                        self.request.response.set_cookie('search', 'base', max_age=timedelta(days=1), path='/')
        elif fleet_search:
            fleets = dbsession.query(func.substring(Fleet.location,1,6).label('location'),\
                    func.count('*').label('fleet_count'), \
                    func.sum(Fleet.size).label("fleet_sum"), \
                    select(["FROM_UNIXTIME(AVG(UNIX_TIMESTAMP(fleet.timestamp)))"]).label("average_age")).\
                    group_by(func.substring(Fleet.location,1,6)).filter(Fleet.location.like("%s%%"%location))
            fleets = performFleetFilter(fleets, self.request.params)
            regions = makeLocationDict(dbsession, location, 1, 6, 4, 5, do_base=False)

            cookie_key = ['owner','size','flying','agevalue', 'maxagefield']
            cookie_set= False

            if "form.fleet_submitted" in self.request.params:
                for key in cookie_key:
                    if self.request.params.get(key,''):
                        cookie_set = True
                        self.request.response.set_cookie(key, self.request.params.get(key), max_age=timedelta(days=1), path='/')
                    else:
                        self.request.response.delete_cookie(key)

                    if cookie_set:
                        self.request.response.set_cookie('search', 'fleet', max_age=timedelta(days=1), path='/')

            min_count = 1
            max_count = 1000000
            for fleet_location, fleet_count, fleet_sum, fleet_age in fleets:
                fleet_sum = fleet_sum / 1000
                row = int(fleet_location[4])
                col = int(fleet_location[5])
                location_info = regions[row][col]
                location_info['fleet_count'] = fleet_count
                location_info['fleet_sum'] = fleet_sum
#                if fleet_sum > max_count:
#                    max_count = fleet_sum
#                if fleet_sum < min_count or min_count == None:
#                    min_count = fleet_sum
#                location_info['base_age'] = getCountColor(base_count)
                regions[row][col] = location_info
            legenda = getCountLegenda(min=min_count,max=max_count, single='fleet', plural='fleets')

            for fleet_location, fleet_count, fleet_sum, fleet_age in fleets:
                row = int(fleet_location[4])
                col = int(fleet_location[5])
                location_info = regions[row][col]
                location_info['base_age'] = getCountColor(math.trunc(fleet_sum), min=min_count,max=max_count, single='fleet', plural='fleets')
            label = 'Fleet sum:'
        else:
            regions = makeLocationDict(dbsession, location, 1, 6, 4, 5)
            label = 'Avg. age'
            legenda = getAgeLegenda()


        returnvalue = {'base_filters': self.basefilters,
                       'fleet_filters': self.fleetfilters,
                       'label': label,
                       'params': params,
                       'bases':bases,
                       'location':location,
                       'regions': regions,
                       'legenda': legenda,
                       'datetime':datetime}
        returnvalue.update(self.request.matchdict)
        return returnvalue
Пример #42
0
def statistics():
    result = setup_simple_command(
        "statistics",
        "Report several different statistics.",
    )
    if isinstance(result, int):
        return result
    else:
        settings, closer, env, args = result

    try:
        # Get the number of users and passwords
        n_users = Session.query(User).count()
        if n_users == 0:
            return

        n_passwords = Session.query(Password).count()

        # How many users are verified
        n_verified = Session.query(User).filter(
            User.email_verified == true()).count()
        # How many users allow the analytics cookie
        n_allow_cookie = Session.query(User).filter(
            User.allow_google_analytics == true()).count()

        # Identity providers
        by_identity = Session.query(
            ExternalIdentity.provider,
            func.count(ExternalIdentity.provider).label('provider_count')
        ).select_from(
            ExternalIdentity
        ).group_by(ExternalIdentity.provider).order_by(desc('provider_count'))

        # Email providers
        domains_with_counts = select([
            func.substring(User.email, r'.*@(.*)').label('domain'),
            func.count('*').label('count'),
        ]).where(User.email != '').group_by('domain').order_by(desc('count'))
        aliased = domains_with_counts.alias()
        by_email = Session.query(aliased).filter(aliased.c.count > 1)

        without_email = Session.query(User).filter(User.email == '').count()
        with_email = n_users - without_email

        # Top ten users
        most_active_users = Session.query(
            User, func.count(User.id).label('password_count'),
        ).join(
            Password
        ).group_by(User.id).order_by(desc('password_count'))

        users_with_passwords = most_active_users.count()
        most_active_users = most_active_users.limit(10)

        # print the statistics
        safe_print('Number of users: %d' % n_users)
        safe_print('Number of passwords: %d' % n_passwords)
        safe_print('Verified users: %.2f%% (%d)' % (
            (100.0 * n_verified) / n_users, n_verified))
        safe_print('Users that allow Google Analytics cookie: %.2f%% (%d)' % (
            (100.0 * n_allow_cookie) / n_users, n_allow_cookie))

        safe_print('Identity providers:')
        for provider, amount in by_identity:
            safe_print('\t%s: %.2f%% (%d)' % (
                provider, (100.0 * amount) / n_users, amount))

        safe_print('Email providers:')
        others = with_email
        for provider, amount in by_email:
            safe_print('\t%s: %.2f%% (%d)' % (
                provider, (100.0 * amount) / with_email, amount))
            others -= amount
        safe_print('\tOthers: %.2f%% (%d)' % (
            (100.0 * others) / with_email, others))
        safe_print('Users without email: %.2f%% (%d)' % (
            (100.0 * without_email) / n_users, without_email))

        safe_print('Most active users:')
        for user, n_passwords in most_active_users:
            safe_print('\t%s: %s' % (get_user_display_name(user), n_passwords))

        users_no_passwords = n_users - users_with_passwords
        safe_print('Users without passwords: %.2f%% (%d)' % (
            (100 * users_no_passwords) / n_users, users_no_passwords))

    finally:
        closer()
Пример #43
0
 def test_substring(self):
     self.assert_compile(
         func.substring("abc", 1, 2), "SUBSTRING(%(substring_1)s FROM %(substring_2)s " "FOR %(substring_3)s)"
     )
     self.assert_compile(func.substring("abc", 1), "SUBSTRING(%(substring_1)s FROM %(substring_2)s)")