def to_df(query_set: QuerySet) -> pd.DataFrame: if query_set.count() == 0: return pd.DataFrame() return pd.DataFrame( ((d, *vs) for d, vs in query_set.values_list('datetime', 'datas')), columns=['datetime', 'macd', 'macdsignal', 'macdhist']).set_index('datetime')
def to_df(query_set: QuerySet) -> pd.DataFrame: if query_set.count() == 0: return pd.DataFrame() return pd.DataFrame( ((d, vs) for d, vs in query_set.values_list('datetime', 'datas')), columns=['datetime', f'ma{query_set[0].param}']).set_index('datetime')
def test_ticket_updates(app): """ Creates a ticket, and attempts to use the API to change the status of the ticket repeatedly. The ticket is checked against the ticket filtering API method as it should be the sole ticket in the database. """ # Make a ticket and save it tick = Ticket(title="Ticket", text="Text", creator="*****@*****.**", assignee="*****@*****.**", status=TicketStatus.Progress.value, create_time=datetime.now()) tick.save() # Change the status of the ticket using the API for status in TicketStatus: # Update the ticket update_resp = app.post("/tickets/{}".format(tick.id), data={"status": status.value}) first_perspective = Ticket.from_json(update_resp.data) # Make sure that ticket closure aligns with close time # (close time is marked iff closure is checked) if status == TicketStatus.Closed: assert first_perspective.close_time is not None else: assert first_perspective.close_time is None # Filter by the new status, the ticket should be the only ticket here filter_resp = app.get("/tickets", data={"status": status.value}) filter_result = QuerySet(Ticket, []).from_json(filter_resp.data) assert len(filter_result) == 1 second_perspective = filter_result[0] # Make sure the two perspectives are the same assert first_perspective == second_perspective # Make sure that filtering on OTHER statuses doesn't make anything appear for other_stat in TicketStatus: if status.value != other_stat.value: other_filter_resp = app.get("/tickets", data={"status": other_stat.value}) other_filter_result = QuerySet(Ticket, []).from_json( other_filter_resp.data) assert len(other_filter_result) == 0 #endregion
def to_df(query_set: QuerySet, with_trait: List[Tuple] = None) -> pd.DataFrame: ohlc = pd.DataFrame(query_set.values_list('datetime', 'code', 'open', 'high', 'low', 'close', 'volume', 'trade_date'), columns=[ 'datetime', 'code', 'open', 'high', 'low', 'close', 'volume', 'trade_date' ]).set_index('datetime', drop=False) traits = [] if with_trait and not ohlc.empty: code = ohlc.code[0] start = ohlc.datetime[0] end = ohlc.datetime[-1] for T, p in with_trait: ts = T.to_df( T.objects(code=code, datetime__gte=start, datetime__lte=end, param=p)) traits.append(ts) ret = pd.concat([ohlc, *traits], axis=1) return ret
def to_df(query_set: QuerySet) -> pd.DataFrame: return pd.DataFrame([ r for r in query_set.values_list('datetime', 'open', 'high', 'low', 'close', 'volume', 'trade_date') ], columns=[ 'datetime', 'open', 'high', 'low', 'close', 'volume', 'trade_date' ]).set_index('datetime', drop=False)
def find_user_exist(self, userId, userCollection): """ 根据用户Id查找用户是否已经存在于集合userCollection中 """ oneUser = UserAccount() oneUser.switch_collection(userCollection) user_existed = QuerySet( UserAccount, oneUser._get_collection()).filter(user_id=userId).first() return user_existed
def get_paginated_items_from_qs(qs: QuerySet, mapping_fn=default_mapping_fn, *args, **kwargs): page = request.args.get('page', default=0) limit = request.args.get('limit', default=MAX_PAGINATED_LIMIT) try: page = int(page) except ValueError: raise PaginationPageInvalid() try: limit = int(limit) except ValueError: raise PaginationLimitInvalid() limit = min(limit, MAX_PAGINATED_LIMIT) skip = page * limit qs = qs.skip(skip).limit(limit) no_total_items = qs.count() no_items = qs.count(with_limit_and_skip=True) no_items_before = max(skip, 0) no_items_after = max(no_total_items - skip - no_items, 0) return { 'limit': limit, 'skip': skip, 'no_items': no_items, 'no_total_items': no_total_items, 'no_items_before': no_items_before, 'no_items_after': no_items_after, 'page': page, 'no_pages': get_page_count(no_total_items, limit), 'no_pages_before': get_page_count(no_items_before, limit), 'no_pages_after': get_page_count(no_items_after, limit), 'items': [mapping_fn(item, *args, **kwargs) for item in qs], }
def to_df(self, query_set: QuerySet) -> pd.DataFrame: cols = ['code', 'datetime', 'open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover'] data = pd.DataFrame(query_set.as_pymongo())[cols] data = data.set_index(['code', 'datetime'], drop=False) if not self._adjust: return data if self._adjust == 'hfq': return self._hfq(data) if self._adjust == 'qfq': return self._qfq(data) return data.sort_index(level=['code', 'datetime'])
def paginate( query_set: QuerySet, page_size: int, page_index: int, response: Response, ) -> QuerySet: """ Paginates a query set, sets the ``X-Pages`` header in the response. Raises a 422 if the page index is invalid. """ max_page = ceil(query_set.count() / page_size) if not 1 <= page_index <= max_page: raise HTTPException( detail="Invalid page index", status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, ) response.headers["X-Pages"] = str(max_page) return query_set[(page_index - 1) * page_size:page_index * page_size]
def to_df(query_set: QuerySet, fq=None) -> pd.DataFrame: cols = ['code', 'date', 'open', 'high', 'low', 'close', 'vol', 'amount', 'date_stamp'] data = pd.DataFrame(query_set.values_list(*cols), columns=cols) data['date'] = pd.to_datetime(data['date']) data = data.set_index(['code', 'date'], drop=False) if fq: codes = data['code'].unique() _info = CNMarketData_XDXR.objects(code__in=codes, category=1) info_cols = ['code', 'date', 'category', 'fenhong', 'peigu', 'peigujia', 'songzhuangu'] if _info.count() > 0: info = pd.DataFrame(_info.values_list(*info_cols), columns=info_cols) data = data.assign(is_trade=1) info['date'] = pd.to_datetime(info['date']) info = info.set_index(['code', 'date']) data_ = pd.concat([data, info], axis=1) def calc_fq(df): df[cols].ffill(inplace=True) df.dropna(subset=cols, inplace=True) df.fillna(0, inplace=True) df['preclose'] = (df['close'].shift(1) * 10 - df['fenhong'] + df['peigu'] * df['peigujia']) \ / (10 + df['peigu'] + df['songzhuangu']) if fq == 'qfq': df['adj'] = (df['preclose'].shift(-1) / df['close']).fillna(1)[::-1].cumprod() else: df['adj'] = (df['close'] / df['preclose'].shift(-1)).cumprod().shift(1).fillna(1) df[['open', 'high', 'low', 'close', 'preclose']] = df[['open', 'high', 'low', 'close', 'preclose']].mul(df['adj'], axis=0) return df data_ = data_.groupby(level='code').apply(calc_fq).droplevel(0) data = data_[data_['is_trade'] == 1].drop(['is_trade', 'category', 'fenhong', 'peigu', 'peigujia', 'songzhuangu', 'preclose', 'adj'], axis=1) return data
def test_view_all_tickets(app): """Create a bunch of tickets, and attempt to retrieve them all using the API""" for i in range(10): tick = Ticket(title="Ticket {}".format(i), text="Text {}".format(i), creator="creator{}@gmail.com".format(i), assignee="assignee{}@gmail.com".format(i), status=TicketStatus.Progress.value, create_time=datetime.now()) tick.save() # Retrieve the ticket from the app resp = app.get("/tickets") assert resp.status_code == 200 gotten_tickets = QuerySet(Ticket, []).from_json(resp.data) assert compare_ticket_lists(gotten_tickets, list(Ticket.objects))
from mongoengine import connect, Document, StringField, ListField, QuerySet from pymongo import read_preferences con = connect( 'Temp', host='10.10.20.37', port=27017, # alias="reuters_single_class", alias="ReutersDataPaul", read_preference=read_preferences.ReadPreference.PRIMARY) class ReutersSingleClass(Document): document_id = StringField(required=True, unique=True) target = ListField(StringField(required=True)) description = StringField(required=True) data_type_lewis = StringField() data_Type_cgi = StringField() thing = ReutersSingleClass() x = QuerySet(ReutersSingleClass, con) for doc in thing.objects: print(doc.target)
def search_entries(string): results = QuerySet.search_text(Entry.objects, string) return results
def mongo_2_df(querySet: QuerySet) -> DataFrame: ''' 将数据库中查询到的数据转换为DataFrame,若无数据返回空的DataFrame ''' return DataFrame.from_dict(json.loads(querySet.to_json())).drop( '_id', axis=1, errors='ignore')
def all_objects(doc_cls, queryset): if not hasattr(doc_cls, '_all_objs_queryset'): doc_cls._all_objs_queryset = QuerySet(doc_cls, doc_cls._get_collection()) return doc_cls._all_objs_queryset
def _get_last_n_calls(n, field_name: str, usercalled_objs: mongoengine.QuerySet): return getattr( usercalled_objs.fields(**{ "slice__" + field_name: -1 * n }).first(), field_name)