import discord from discord.ext import commands import nest_asyncio import time nest_asyncio.apply() bot = commands.Bot(command_prefix='0_<') TOKEN = '' @bot.event async def on_ready(): print(bot.user.name, 'has woken up.') # To check bot run await bot.change_presence(status=discord.Status.online, activity=discord.Game('Greeting')) @bot.event async def message_react(message): if message.author == bot.user: # except bot's msg return if message.content.startswith('@hello'): # hello msg cchannel = message.channel # cchannel = current channel await cchannel.send('Hello World!') def check(m): return m.content == 'hello' and m.channel == cchannel msg = await bot.wait_for('message', check=check) await cchannel.send('Hello {.author}!'.format(msg)) if message.content.startswith('$emoji'):
def patchAsyncio(): """Patch asyncio to allow nested event loops.""" import nest_asyncio nest_asyncio.apply()
def client(): nest_asyncio.apply() with TestClient(app=app) as test_client: yield test_client
logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) logger.info('Starting BotteBot application...') # Connect to SQLite3 database logger.info('Connected to SQLite database!') def signal_handler(): loop.stop() logger.info("Program exiting gracefully") logging.shutdown() if __name__ == '__main__': logger = logging.getLogger() loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGINT, signal_handler) loop.add_signal_handler(signal.SIGTERM, signal_handler) try: main() nest_asyncio.apply(loop) loop.run_until_complete( asyncio.gather(services.start_scheduler(), services.start_slack_client(), services.start_web_server())) except Exception as e: logger.error(e) finally: sys.exit(0)
async def remind(self, ctx, who: typing.Union[discord.TextChannel, discord.Member], when: str, *, message: str): """ Set a reminder for yourself or channel. Time format examples: 1d, 7h, 3h30m, 1d7h,15m """ d_char = "d" h_char = "h" m_char = "m" s_char = "s" if type(who) == discord.Member and not ctx.author == who: raise ValueError( "You cannot set reminders for anyone other than yourself!") elif type(who) == discord.TextChannel and who.id in CHAN_BANNED: raise ValueError(f"You cannot set reminders for {who}!") today = datetime.today() # .astimezone(tz=TZ) def get_value(which: str, from_when: str): import re if which in from_when: raw = from_when.split(which)[0] if raw.isnumeric(): return int(raw) else: try: findall = re.findall(r"\D", raw)[-1] return int(raw[raw.find(findall) + 1:]) except: return 0 else: return 0 days = get_value(d_char, when) hours = get_value(h_char, when) minutes = get_value(m_char, when) seconds = get_value(s_char, when) delta = timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds) min_timer_allowed = 5 if delta.total_seconds() < min_timer_allowed: raise ValueError( f"The duration entered is too short! The minimum allowed timer is {min_timer_allowed} seconds." ) try: raw_when = today + delta except ValueError: raise ValueError("The duration entered is too large!") duration = raw_when - today alert = today + duration await ctx.send( f"Setting a timer for [{who}] in [{duration.total_seconds()}] seconds. The timer will go off at [{alert.strftime('%x %X')}]." ) author = f"{ctx.author.name}#{ctx.author.discriminator}" process_MySQL(sqlRecordTasks, values=(who.id, message, str(alert), 1, author)) import nest_asyncio nest_asyncio.apply() asyncio.create_task( send_reminder(thread=1, duration=duration.total_seconds(), who=who, message=message, author=ctx.author, flag=str(alert)))
import twint import nest_asyncio nest_asyncio.apply() # prevent event loop to be nested def get_config(username): config = twint.Config() config.Hide_output = True config.Store_object = True config.Username = username return config def get_info(username): c = get_config(username) twint.output.clean_lists() twint.run.Lookup(c) try: return twint.output.users_list[0].__dict__ except: return [] def get_tweets(username): c = get_config(username) c.Lang = "en" c.Limit = 200 twint.output.clean_lists() twint.run.Search(c) try:
def nest(): import nest_asyncio nest_asyncio.apply()
def run_server( context: Context = None, client: dask.distributed.Client = None, host: str = "0.0.0.0", port: int = 8080, startup=False, log_level=None, blocking: bool = True, jdbc_metadata: bool = False, ): # pragma: no cover """ Run a HTTP server for answering SQL queries using ``dask-sql``. It uses the `Presto Wire Protocol <https://github.com/prestodb/presto/wiki/HTTP-Protocol>`_ for communication. This means, it has a single POST endpoint `/v1/statement`, which answers SQL queries (as string in the body) with the output as a JSON (in the format described in the documentation above). Every SQL expression that ``dask-sql`` understands can be used here. See :ref:`server` for more information. Note: The presto protocol also includes some statistics on the query in the response. These statistics are currently only filled with placeholder variables. Args: context (:obj:`dask_sql.Context`): If set, use this context instead of an empty one. client (:obj:`dask.distributed.Client`): If set, use this dask client instead of a new one. host (:obj:`str`): The host interface to listen on (defaults to all interfaces) port (:obj:`int`): The port to listen on (defaults to 8080) startup (:obj:`bool`): Whether to wait until Apache Calcite was loaded log_level: (:obj:`str`): The log level of the server and dask-sql blocking: (:obj:`bool`): If running in an environment with an event loop (e.g. a jupyter notebook), do not block. The server can be stopped with `context.stop_server()` afterwards. jdbc_metadata: (:obj:`bool`): If enabled create JDBC metadata tables using schemas and tables in the current dask_sql context Example: It is possible to run an SQL server by using the CLI script ``dask-sql-server`` or by calling this function directly in your user code: .. code-block:: python from dask_sql import run_server # Create your pre-filled context c = Context() ... run_server(context=c) After starting the server, it is possible to send queries to it, e.g. with the `presto CLI <https://prestosql.io/docs/current/installation/cli.html>`_ or via sqlalchemy (e.g. using the `PyHive <https://github.com/dropbox/PyHive#sqlalchemy>`_ package): .. code-block:: python from sqlalchemy.engine import create_engine engine = create_engine('presto://localhost:8080/') import pandas as pd pd.read_sql_query("SELECT 1 + 1", con=engine) Of course, it is also possible to call the usual ``CREATE TABLE`` commands. If in a jupyter notebook, you should run the following code .. code-block:: python from dask_sql import Context c = Context() c.run_server(blocking=False) ... c.stop_server() Note: When running in a jupyter notebook without blocking, it is not possible to access the SQL server from within the notebook, e.g. using sqlalchemy. Doing so will deadlock infinitely. """ _init_app(app, context=context, client=client) if jdbc_metadata: create_meta_data(context) if startup: app.c.sql("SELECT 1 + 1").compute() config = Config(app, host=host, port=port, log_level=log_level) server = Server(config=config) loop = asyncio.get_event_loop() if blocking: if loop and loop.is_running(): apply(loop=loop) server.run() else: if not loop or not loop.is_running(): raise AttributeError( "blocking=True needs a running event loop (e.g. in a jupyter notebook)" ) loop.create_task(server.serve()) context.sql_server = server
def event_loop(event_loop) -> AbstractEventLoop: nest_asyncio.apply(event_loop) return event_loop
#created by Alex Oraibi. #note some functions are taken from courses on Udemy.com import numpy as np import pandas as pd import sys from datetime import datetime, timedelta, time, timezone import talib as ta from talib import MA_Type from stocktrends import Renko from ib_insync import * import statsmodels.api as sm import nest_asyncio nest_asyncio.apply() # enable nest asyncio sys.setrecursionlimit(10**9) # set recursion limit to 1000000000 pd.options.mode.chained_assignment = None # remove a warning def slope(ser, n): "function to calculate the slope of n consecutive points on a plot" slopes = [i * 0 for i in range(n - 1)] for i in range(n, len(ser) + 1): y = ser[i - n:i] x = np.array(range(n)) y_scaled = (y - y.min()) / (y.max() - y.min()) x_scaled = (x - x.min()) / (x.max() - x.min()) x_scaled = sm.add_constant(x_scaled) model = sm.OLS(y_scaled, x_scaled) results = model.fit() slopes.append(results.params[-1])
def run_from_notebook(): '''if the server is starting from jupyter notebook, user must call run_from_notebook() function befor calling start() | start_from_colab() function ''' import nest_asyncio nest_asyncio.apply()
def DSU_main(): if True: nest_asyncio.apply() asyncio.get_event_loop().run_until_complete(DSU_online())
def notify_tagged_user(application, userTags, project, note, category, tagger, timestamp): view_result = {} time_in_format = datetime.datetime.strptime( timestamp, '%Y-%m-%d %H:%M:%S').strftime("%a %b %d %Y, %I:%M:%S %p") for row in application.gs_users_db.view('authorized/users'): if row.key != 'genstat_defaults': view_result[row.key.split('@')[0]] = row.key if category: category = ' - ' + category for user in userTags: if user[1] in view_result: user = user[1] option = PresetsHandler.get_user_details( application, view_result[user]).get('notification_preferences', 'Both') #Adding a slack IM to the tagged user with the running note if option == 'Slack' or option == 'Both': nest_asyncio.apply() client = slack.WebClient(token=application.slack_token) blocks = [{ "type": "section", "text": { "type": "mrkdwn", "text": ("_You have been tagged by *{}* in a running note for the project_ " "<{}/project/{}|{}>! :smile: \n_The note is as follows:_ \n\n\n" ).format( tagger, application.settings['redirect_uri'].rsplit( '/', 1)[0], project, project) } }, { "type": "section", "text": { "type": "mrkdwn", "text": ">*{} - {}{}*\n>{}\n\n\n\n _(Please do not respond to this message here in Slack." " It will only be seen by you.)_".format( tagger, time_in_format, category, note.replace('\n', '\n>')) } }] try: userid = client.users_lookupByEmail( email=view_result[user]) channel = client.conversations_open( users=userid.data['user']['id']) client.chat_postMessage( channel=channel.data['channel']['id'], blocks=blocks) client.conversations_close( channel=channel.data['channel']['id']) except Exception: #falling back to email option = 'E-mail' #default is email if option == 'E-mail' or option == 'Both': msg = MIMEMultipart('alternative') msg['Subject'] = '[GenStat] Running Note:{}'.format( project) msg['From'] = 'genomics-status' msg['To'] = view_result[user] text = 'You have been tagged by {} in a running note in the project {}! The note is as follows\n\ >{} - {}{}\ >{}'.format(tagger, project, tagger, time_in_format, category, note) html = '<html>\ <body>\ <p> \ You have been tagged by {} in a running note in the project <a href="{}/project/{}">{}</a>! The note is as follows</p>\ <blockquote>\ <div class="panel panel-default" style="border: 1px solid #e4e0e0; border-radius: 4px;">\ <div class="panel-heading" style="background-color: #f5f5f5; padding: 10px 15px;">\ <a href="#">{}</a> - <span>{}</span> <span>{}</span>\ </div>\ <div class="panel-body" style="padding: 15px;">\ <p>{}</p>\ </div></div></blockquote></body></html>'.format( tagger, application.settings['redirect_uri'].rsplit('/', 1)[0], project, project, tagger, time_in_format, category, markdown.markdown(note)) msg.attach(MIMEText(text, 'plain')) msg.attach(MIMEText(html, 'html')) s = smtplib.SMTP('localhost') s.sendmail('*****@*****.**', msg['To'], msg.as_string()) s.quit()
#region imports import requests, os, time, asyncio, json, discord, datetime, io, platform, logging from decouple import config from random import * from discord.utils import get from discord.ext import commands from data import DATACENTRE as dat from data import download_auctions from collections import defaultdict from re import search from aiohttp import ClientSession from data import ExitForLoop import nest_asyncio as nasync from autocorrect import * nasync.apply() LOG = './tmp/ccd.log' logging.basicConfig(filename=LOG, filemode="w", level=logging.DEBUG) # console handler console = logging.StreamHandler() console.setLevel(logging.ERROR) logging.getLogger("").addHandler(console) logging.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") logging.debug("STARTED LOGGING SESSION") logging.debug("MonkeyBot by Void Moment#8152 (c)") logging.debug("DO `m!help` IN DISCORD CLIENT FOR COMMAND INFO") logging.debug("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") pages = dat.pages val_channels = dat.val_channels
def loop_tk(kernel): """Start a kernel with the Tk event loop.""" from tkinter import Tk, READABLE app = Tk() # Capability detection: # per https://docs.python.org/3/library/tkinter.html#file-handlers # file handlers are not available on Windows if hasattr(app, 'createfilehandler'): # A basic wrapper for structural similarity with the Windows version class BasicAppWrapper: def __init__(self, app): self.app = app self.app.withdraw() def process_stream_events(stream, *a, **kw): """fall back to main loop when there's a socket event""" if stream.flush(limit=1): app.tk.deletefilehandler(stream.getsockopt(zmq.FD)) app.quit() # For Tkinter, we create a Tk object and call its withdraw method. kernel.app_wrapper = BasicAppWrapper(app) notifier = partial(process_stream_events, kernel.shell_stream) # seems to be needed for tk notifier.__name__ = "notifier" app.tk.createfilehandler(kernel.shell_stream.getsockopt(zmq.FD), READABLE, notifier) # schedule initial call after start app.after(0, notifier) app.mainloop() else: import asyncio import nest_asyncio nest_asyncio.apply() doi = kernel.do_one_iteration # Tk uses milliseconds poll_interval = int(1000 * kernel._poll_interval) class TimedAppWrapper: def __init__(self, app, func): self.app = app self.app.withdraw() self.func = func def on_timer(self): loop = asyncio.get_event_loop() try: loop.run_until_complete(self.func()) except Exception: kernel.log.exception("Error in message handler") self.app.after(poll_interval, self.on_timer) def start(self): self.on_timer() # Call it once to get things going. self.app.mainloop() kernel.app_wrapper = TimedAppWrapper(app, doi) kernel.app_wrapper.start()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Dec 22 11:38:18 2020 #GST_DEBUG=3 gst-launch-1.0 -v videotestsrc is-live=true ! video/x-raw,width=120,height=80 ! videoconvert ! omxh264enc ! capsfilter caps="video/x-h264,profile=(string)baseline" ! rtph264pay name=videopay ! fakesink GST_DEBUG=3 gst-launch-1.0 -v v4l2src device=/dev/video0 ! video/x-raw,width=120,height=80,framerate=30/1 ! videoconvert ! omxh264enc ! capsfilter caps="video/x-h264,profile=(string)baseline, level=(string)1" ! rtph264pay name=videopay ! fakesink @author: klaus """ import nest_asyncio nest_asyncio.apply() #required to run this programm in spyder import PipelineBuilder import jwt_token as jwt import ssl import websockets import asyncio import sys import json import argparse import time import gi gi.require_version('Gst', '1.0') from gi.repository import Gst #gi.require_version('GstPbutils', '1.0') #from gi.repository import GstPbutils gi.require_version('GstWebRTC', '1.0') from gi.repository import GstWebRTC gi.require_version('GstSdp', '1.0') from gi.repository import GstSdp import logging as logger logger.basicConfig(format='%(asctime)s %(message)s', level=logger.INFO)
async def test_update_every_x_seconds(self, mocker, monkeypatch): # workaround for exception "This event loop is already running" import nest_asyncio nest_asyncio.apply() settings.graph_update_every = 2 def mock_get_deployments(*args, **kwargs): return [ get_streaming_app_deployment("streaming-app1", "input-topic1", "output-topic1", "error-topic1"), get_streaming_app_deployment("streaming-app2", "input-topic2", "output-topic2", "error-topic2"), get_streaming_app_deployment( "streaming-app3", "input-topic3", "output-topic3", "error-topic3", pipeline="pipeline2", ), ] def mock_get_stateful_sets(*args, **kwargs): return [] def mock_get_cron_jobs(*args, **kwargs): return [V1beta1CronJob(metadata=V1ObjectMeta(name="test"))] monkeypatch.setattr(StreamsExplorer, "get_deployments", mock_get_deployments) monkeypatch.setattr(StreamsExplorer, "get_stateful_sets", mock_get_stateful_sets) monkeypatch.setattr(StreamsExplorer, "get_cron_jobs", mock_get_cron_jobs) monkeypatch.setattr(StreamsExplorer, "setup", lambda _: None) mocker.patch( "streams_explorer.core.services.kafkaconnect.KafkaConnect.get_connectors", lambda: ["connector1", "connector2"], ) def get_connector_info(connector_name: str): if connector_name == "connector1": return { "config": { "connector.class": "io.confluent.connect.elasticsearch.ElasticsearchSinkConnector", "topics": "output-topic1,output-topic2", "test": "test_value", }, "type": "sink", } return { "config": { "connector.class": "io.confluent.connect.elasticsearch.ElasticsearchSinkConnector", "topics": "output-topic3", "transforms.changeTopic.replacement": "test-index", }, "type": "sink", } mocker.patch( "streams_explorer.core.services.kafkaconnect.KafkaConnect.get_connector_info", get_connector_info, ) mocker.patch( "streams_explorer.extractors.load_extractors", lambda: None, ) from main import app with TestClient(app) as client: await asyncio.sleep(0.1) response = client.get(f"{API_PREFIX}/graph") assert len(response.json().get("nodes")) == 15 def mock_get_deployments(*args, **kwargs): return [ get_streaming_app_deployment( "streaming-app1", "input-topic1", "output-topic1", "error-topic1", ), get_streaming_app_deployment( "streaming-app2", "input-topic2", "output-topic2", "error-topic2", ), ] monkeypatch.setattr(StreamsExplorer, "get_deployments", mock_get_deployments) monkeypatch.setattr(StreamsExplorer, "get_stateful_sets", mock_get_stateful_sets) await asyncio.sleep(2) response = client.get(f"{API_PREFIX}/graph") assert len(response.json().get("nodes")) == 12 mocker.patch( "streams_explorer.core.services.kafkaconnect.KafkaConnect.get_connectors", lambda: ["connector1"], ) await asyncio.sleep(2) response = client.get(f"{API_PREFIX}/graph") assert len(response.json().get("nodes")) == 9
def run_app(self, app, workers=1): self._start_server() nest_asyncio.apply() uvicorn.run(app, host="127.0.0.1", port=self.port, workers=workers)
def __init__(self, client: '_client.Userbot', **kwargs) -> None: super().__init__(**kwargs) self._channel = self.getCLogger(__name__) Conv.init(client) nest_asyncio.apply()
# WindowsProactorEventLoopPolicy raises an exception in tornado (used by Jupyter) # and causes a hang with websockets. if sys.platform.startswith('win') and sys.version_info[:3] >= (3, 8, 0) and \ isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # `asyncio` by design does not allow event loops to be nested. Jupyter (really # tornado) has its own event loop already so we need to patch it. # Patch asyncio to allow nested use of `loop.run_until_complete()`. # Before applying the patch, check if an event loop is available otherwise # create one and set it active, also register cleanup for end try: LOOP = asyncio.get_event_loop() except RuntimeError: LOOP = asyncio.new_event_loop() nest_asyncio.apply(LOOP) class WebsocketMessage(ABC): """Container for a message sent or received via websockets.""" def __init__(self, type_: str) -> None: """WebsocketMessage constructor. Args: type_: Message type. """ self.type_ = type_ @abstractmethod def get_data(self) -> Union[str, Dict[str, str]]:
def Search( query: Text, from_date: datetime.datetime = None, to_date: datetime.datetime = None, number_of_results: int = 100, ) -> pandas.DataFrame: """Search tweets. Args: query: the search query. from_date: search from this datetime. to_date: search till this datetime. number_of_results: number of results to return. Returns: A dataframe of tweets. For columns, reference: { 'id': 1371248526085226496, 'conversation_id': '1371248036563795969', 'created_at': '2021-03-14 23:54:59 UTC', 'date': '2021-03-14', 'time': '23:54:59', 'timezone': '+0000', 'user_id': 1233956153656332291, 'username': '******', 'name': 'funy guy sbungbob', 'place': '', 'tweet': '@Zer0Priv And stock up on Bitcoin and GameStop stocks', 'language': 'en', 'mentions': [], 'urls': [], 'photos': [], 'replies_count': 0, 'retweets_count': 0, 'likes_count': 2, 'hashtags': [], 'cashtags': [], 'link': 'https://twitter.com/je4ia/status/1371248526085226496', 'retweet': False, 'quote_url': '', 'video': 0, 'thumbnail': '', 'near': '', 'geo': '', 'source': '', 'user_rt_id': '', 'user_rt': '', 'retweet_id': '', 'reply_to': [{'screen_name': 'Zer0Priv', 'name': 'Zer0', 'id': '1256485417744031747'}], 'retweet_date': '', 'translate': '', 'trans_src': '', 'trans_dest': '', }, """ nest_asyncio.apply() c = twint.Config() c.Search = query if from_date: c.Since = from_date.strftime('%Y-%m-%d %H:%M:%S') if to_date: c.Until = to_date.strftime('%Y-%m-%d %H:%M:%S') c.Limit = number_of_results c.Pandas = True c.Hide_output = True twint.run.Search(c) return twint.storage.panda.Tweets_df
def main(self): nest_asyncio.apply() warnings.simplefilter('ignore', ResourceWarning) print('开始爬取工作...', time.time() - self.time_start) if self.limit_num_pages > 1: num_pages = int( etree.HTML( requests.get(self.l_url_search[0], headers=self.headers).text).xpath( './/*[@id="srp-pagination"]/li/text()') [0].split('of')[-1].strip()) else: num_pages = 1 print('共有{}页搜索结果'.format(str(min(num_pages, self.limit_num_pages)))) num_batch_search = (min(self.limit_num_pages, num_pages) - 1) // 20 + 1 for i in range(num_batch_search): # await asyncio.sleep(self.sleep_search) time.sleep(self.sleep_search) print('finish sleeping, start searching', time.time() - self.time_start) l_task_search = [] for url in self.l_url_search[20 * i:min(20 * ( i + 1), num_pages, self.limit_num_pages)]: # await asyncio.sleep(self.sleep_search) task_search = asyncio.ensure_future( self.parse_search_result(url)) # task_search.add_done_callback(self.parse_extract_info) l_task_search.append(task_search) print('there are {} search tasks in total in batch_{}'.format( str(len(l_task_search)), str(i + 1))) loop_search = asyncio.get_event_loop() loop_search.run_until_complete(asyncio.wait(l_task_search)) self.df_url_extract.to_csv('./url_extract_restore_{}.csv'.format( str(i + 1)), encoding='utf-8') print('搜索完毕!共有{}条搜索结果'.format(str(len(self.l_url_extract))), time.time() - self.time_start) self.df_url_extract = self.df_url_extract.reset_index().drop(0, axis=0).T self.df_url_extract.to_csv('./url_extract.csv', encoding='utf-8') print('self.df_url_extract:', self.df_url_extract) print('shape of self.df_url_extract: {}'.format( self.df_url_extract.shape)) num_theses = min(self.limit_num_theses, len(self.l_url_extract)) num_batch_extract = (num_theses - 1) // 20 + 1 for i in range(num_batch_extract): # await asyncio.sleep(self.sleep_search) time.sleep(self.sleep_extract) print('finish sleeping, start extracting', time.time() - self.time_start) l_task_extract = [] for url in self.l_url_extract[20 * i:min(20 * (i + 1), num_theses)]: # await asyncio.sleep(self.sleep_extract) task_extract = asyncio.ensure_future( self.parse_extract_info(url)) l_task_extract.append(task_extract) print('there are {} extract tasks in total in batch_{}'.format( str(len(l_task_extract)), str(i + 1))) loop_search = asyncio.get_event_loop() loop_search.run_until_complete(asyncio.wait(l_task_extract)) print('爬取完毕!共采集到并解析了{}篇论文的详细信息'.format(num_theses))
import nest_asyncio; nest_asyncio.apply() import discord from discord.ext import commands client = commands.Bot(command_prefix = '++') @client.event async def on_ready(): print('We have logged in as {0.user}'.format(client)) @client.event async def on_message(message): if message.author == client.user: return if message.content.startswith('$hello'): await message.channel.send('Hello!') client.run('TOKEN')
def __del__(self): if hasattr(self, "session"): nest_asyncio.apply() loop = asyncio.get_event_loop() loop.run_until_complete(self.session.close())
import nest_asyncio from car_market.domain.models import Car nest_asyncio.apply( ) # https://github.com/encode/starlette/issues/440#issuecomment-524613908 import pytest from fastapi.testclient import TestClient from main import app from fastapi.encoders import jsonable_encoder client = TestClient(app) @pytest.mark.asyncio async def test_market_resolver_WhenSalesEvenThenMiddleMean(): response = client.post( "/market", json=jsonable_encoder(Car(brand=2, model=1, city=1)), ) assert response.status_code == 200 assert response.text == '{"car":{"brand":2,"model":1,"city":1},' \ '"median_price":102.5,' \ '"sales":[{"id":4,"car":{"brand":2,"model":1,"city":1},"selling_price":100},' \ '{"id":5,"car":{"brand":2,"model":1,"city":1},"selling_price":105}]}' @pytest.mark.asyncio async def test_market_resolver_WhenSalesOddThenMiddle(): response = client.post(
def __init__(self, bot): self.bot = bot nest_asyncio.apply()
def setUp(self): self.loop = asyncio.new_event_loop() nest_asyncio.apply(self.loop) asyncio.set_event_loop(self.loop) self.loop.set_debug(True) self.loop.set_exception_handler(exception_handler)
async def backtest_run( start: datetime, duration: timedelta, ref_run_id: int, specific_symbols: List[str] = None, ) -> None: if specific_symbols: symbols_and_start_time: List = [] for symbol in specific_symbols: symbols_and_start_time.append((symbol, start)) num_symbols = len(specific_symbols) elif not strict: symbols_and_start_time = await TrendingTickers.load(batch_id) num_symbols = len(symbols_and_start_time) else: print("strict mode selected, loading symbols from trades") nest_asyncio.apply() _df = load_trades_by_batch_id(batch_id) symbols = _df.symbol.unique().tolist() num_symbols = len(symbols) est = pytz.timezone("America/New_York") start_time = pytz.utc.localize( _df.start_time.min()).astimezone(est) symbols_and_start_time = list( zip(symbols, [start_time for x in range(num_symbols)])) print(f"loaded {len(symbols_and_start_time)} symbols") if num_symbols > 0: est = pytz.timezone("America/New_York") start_time = pytz.utc.localize(start).astimezone(est) config.market_open = start_time.replace(hour=9, minute=30, second=0, microsecond=0) config.market_close = start_time.replace(hour=16, minute=0, second=0, microsecond=0) print(f"market_open {config.market_open}") await create_strategies( conf_dict, duration, ref_run_id, uid, start, bypass_duration is not None, ) for symbol_and_start_time in symbols_and_start_time: symbol = symbol_and_start_time[0] await backtest_symbol( data_api=data_api, portfolio_value=portfolio_value, symbol=symbol, start=start, duration=duration, scanner_start_time=symbol_and_start_time[1], debug_symbol=True if symbol in debug_symbols else False, )
import pandas as pd from requests_html import AsyncHTMLSession from bs4 import BeautifulSoup as bs # importing BeautifulSoup import nest_asyncio import json import re import time nest_asyncio.apply() #jupyter notebook일 경우 import pymysql from tqdm import tqdm df_cid['channel_id'] #Youtube 채널 아이디 목록 17000여개 url = "https://www.youtube.com/channel/" session = AsyncHTMLSession() mail_pattern = r"([\w\.-]+)@([\w\.-]+)(\.[\w\.]+)" #이메일 패턴 정규식 results = {} cnt = 0 for cid in tqdm(df_cid['channel_id']): result = {} video_url = url+cid response = await session.get(video_url) soup = bs(response.html.html, "html.parser") if(soup.find("meta", itemprop="description") != None): description = soup.find("meta", itemprop="description")['content'] else: description = ''
""" Seamless: framework for data-driven and live programming Copyright 2016-2018, Sjoerd de Vries """ import sys import time import functools import traceback import nest_asyncio nest_asyncio.apply() import asyncio #asyncio.get_event_loop().set_debug(True) from abc import abstractmethod class Wrapper: @abstractmethod def _unwrap(self): pass #Dependencies of seamless # 1. hard dependencies; without these, "import seamless" will fail. # Still, if necessary, some of these dependencies could be removed, but seamless would have to be more minimalist in loading its lib import numpy as np if np.dtype(np.object).itemsize != 8: raise ImportError("Seamless requires a 64-bit system")
def run_list(args): nest_asyncio.apply() asyncio.run(show_bots())