class Client(FunctionalChannel):
    @remote
    async def reconfigure(self):
        utils.set_id(None)
        utils.set_secret(None)
        utils.set_name(None)
        utils.save_data()
        await authenticate()
    
    sync_lock=asyncio.Lock()
    sync_executor = ThreadPoolExecutor(max_workers=4)
    @remote
    async def sync_queue_folder(self,server_files):
        async with Client.sync_lock:
            folder=Path(utils.get_queue_sync_folder())
            folder.mkdir(exist_ok=True,parents=True)
            md5=[]
            files=[]
            names=[]
            
            #construimos la lista de archivos que tenemos
            for file in folder.iterdir():
                if not file.is_file():continue
                name=file.name
                try:
                    _,gcode_name=name.split(" ",1)
                    _=int(_)
                except ValueError:
                    file.unlink()
                    continue
                md5_future=asyncio.get_event_loop().run_in_executor(Client.sync_executor,utils.file_md5,str(file))
                
                files.append(file)
                md5.append(md5_future)
                names.append(gcode_name)
            md5=await asyncio.gather(*md5)
            local_files=[{"md5":md5[i],"name":names[i],"file":files[i]} for i in range(len(files))]
            
            #calculamos archivos que tenemos que elimar, renombrar y descargar
            to_remove={l["file"] for l in local_files}
            to_download=set()
            to_rename=set()
            
            if server_files:
                zero_pad=int(math.log10(len(server_files))+1)
            for i,server_file in enumerate(server_files):
                name=server_file["name"]
                filename="%s %s"%(str(i+1).zfill(zero_pad),name)
                md5=server_file["md5"]
                for local_file in local_files[:]:
                    if local_file["name"]==name and local_file["md5"]==md5:
                        file=local_file["file"]
                        to_remove.discard(file)
                        if file.name!=filename:
                            to_rename.add((file,file.with_name(filename)))
                        
                        local_files.remove(local_file) #esto es porque si hay repetidos, no podemos mover mas de una vez
                            #seria bueno copiar los repetidos en vez de volver a descargar... pero meh
                        break
                else:
                    to_download.add((server_file["url"],folder/filename))
            
            #elimianos
            for file in to_remove: file.unlink()
            
            #renombramos
            temporal_prefix="tempname"
            for current,new in to_rename:
                current.rename(current.with_name(temporal_prefix+current.name))
            for current,new in to_rename:
                current.with_name(temporal_prefix+current.name).rename(new)

            down_futs=[]
            
            #descargamos
            for url,file in to_download:
                down_futs.append(asyncio.get_event_loop().run_in_executor(Client.sync_executor,urllib.request.urlretrieve,SERVER_HOST+url,str(file)))
            await asyncio.gather(*down_futs)
            
        
    async def send_ac_message(self,m):
        await _ws.send(m)
Exemplo n.º 2
0
_LOG = logging.getLogger(__name__)
_LOG_STR = "<<<!  :::::  %s  :::::  !>>>"

_PYROFUNC = Callable[['types.bound.Message'], Any]
_TASK_1_START_TO = time.time()
_TASK_2_START_TO = time.time()

_B_CMN_CHT: List[int] = []
_B_AD_CHT: Dict[int, ChatMember] = {}
_B_NM_CHT: Dict[int, ChatMember] = {}

_U_AD_CHT: Dict[int, ChatMember] = {}
_U_NM_CHT: Dict[int, ChatMember] = {}

_CH_LKS: Dict[str, asyncio.Lock] = {}
_CH_LKS_LK = asyncio.Lock()
_INIT_LK = asyncio.Lock()


async def _update_u_cht(r_m: RawMessage) -> Optional[ChatMember]:
    if r_m.chat.id not in {**_U_AD_CHT, **_U_NM_CHT}:
        try:
            user = await r_m.chat.get_member(RawClient.USER_ID)
        except UserNotParticipant:
            return None
        user.can_all = None
        if user.status == "creator":
            user.can_all = True
        if user.status in ("creator", "administrator"):
            _U_AD_CHT[r_m.chat.id] = user
        else:
# 辞書から問題を1問取ってくる
def get_problem(dictionary):
    return dictionary[random.randrange(len(dictionary))]

# init
dictionary = init_dictionary('dictionary_list')
print('dictionary size : ', len(dictionary))
if len(dictionary) == 0:
    print('problem not found')
    sys.exit()
extra_dictionary = init_dictionary('extra_dictionary_list')
print('extra_dictionary size : ', len(extra_dictionary))
token = read_token()
active_channel_id = read_active_channel_id()
lock = asyncio.Lock()


# status
question_solving = False
contest_solving = False
contest_problem_num = 0;
contest_solving_num = 0;
problem = ''
answer = ''
others = []

# 初期化
def hard_reset():
    global question_solving
    question_solving = False
Exemplo n.º 4
0
 def __init__(self, connection):
     self.connection = weakref.ref(connection)
     self.reconnecting = asyncio.Lock(loop=connection.loop)
     self.close_called = asyncio.Event(loop=connection.loop)
Exemplo n.º 5
0
    def __init__(self,
                 session,
                 api_id,
                 api_hash,
                 *,
                 connection=ConnectionTcpFull,
                 use_ipv6=False,
                 proxy=None,
                 timeout=timedelta(seconds=10),
                 request_retries=5,
                 connection_retries=5,
                 auto_reconnect=True,
                 report_errors=True,
                 device_model=None,
                 system_version=None,
                 app_version=None,
                 lang_code='en',
                 system_lang_code='en',
                 loop=None):
        if not api_id or not api_hash:
            raise ValueError("Your API ID or Hash cannot be empty or None. "
                             "Refer to telethon.rtfd.io for more information.")

        self._use_ipv6 = use_ipv6
        self._loop = loop or asyncio.get_event_loop()

        # Determine what session object we have
        if isinstance(session, str) or session is None:
            session = SQLiteSession(session)
        elif not isinstance(session, Session):
            raise TypeError(
                'The given session must be a str or a Session instance.')

        # ':' in session.server_address is True if it's an IPv6 address
        if (not session.server_address
                or (':' in session.server_address) != use_ipv6):
            session.set_dc(
                DEFAULT_DC_ID,
                DEFAULT_IPV6_IP if self._use_ipv6 else DEFAULT_IPV4_IP,
                DEFAULT_PORT)

        session.report_errors = report_errors
        self.session = session
        self.api_id = int(api_id)
        self.api_hash = api_hash

        self._request_retries = request_retries or sys.maxsize
        self._connection_retries = connection_retries or sys.maxsize
        self._auto_reconnect = auto_reconnect

        if isinstance(connection, type):
            connection = connection(proxy=proxy,
                                    timeout=timeout,
                                    loop=self._loop)

        # Used on connection. Capture the variables in a lambda since
        # exporting clients need to create this InvokeWithLayerRequest.
        system = platform.uname()
        self._init_with = lambda x: functions.InvokeWithLayerRequest(
            LAYER,
            functions.InitConnectionRequest(
                api_id=self.api_id,
                device_model=device_model or system.system or 'Unknown',
                system_version=system_version or system.release or '1.0',
                app_version=app_version or self.__version__,
                lang_code=lang_code,
                system_lang_code=system_lang_code,
                lang_pack='',  # "langPacks are for official apps only"
                query=x))

        state = MTProtoState(self.session.auth_key)
        self._connection = connection
        self._sender = MTProtoSender(state,
                                     connection,
                                     self._loop,
                                     retries=self._connection_retries,
                                     auto_reconnect=self._auto_reconnect,
                                     update_callback=self._handle_update)

        # Cache :tl:`ExportedAuthorization` as ``dc_id: MTProtoState``
        # to easily import them when getting an exported sender.
        self._exported_auths = {}

        # Save whether the user is authorized here (a.k.a. logged in)
        self._authorized = None  # None = We don't know yet

        # Default PingRequest delay
        self._last_ping = datetime.now()
        self._ping_delay = timedelta(minutes=1)

        self._updates_handle = None
        self._last_request = time.time()
        self._channel_pts = {}

        # Start with invalid state (-1) so we can have somewhere to store
        # the state, but also be able to determine if we are authorized.
        self._state = types.updates.State(-1, 0, datetime.now(), 0, -1)

        # Some further state for subclasses
        self._event_builders = []
        self._events_pending_resolve = []
        self._event_resolve_lock = asyncio.Lock()

        # Default parse mode
        self._parse_mode = markdown

        # Some fields to easy signing in. Let {phone: hash} be
        # a dictionary because the user may change their mind.
        self._phone_code_hash = {}
        self._phone = None
        self._tos = None

        # Sometimes we need to know who we are, cache the self peer
        self._self_input_peer = None
Exemplo n.º 6
0
    def __init__(self, network, config: SimpleConfig=None):
        global INSTANCE
        INSTANCE = self

        Logger.__init__(self)

        if constants.net.TESTNET:
            self.default_port = 18321
            self.start_str = b'\x0B\x11\x09\x07'
            self.spork_address = 'movuntE9Cn6zgxtzabQbgqDVQKUNPv49RJ' #02ba07bdd2ec80a1836102c4a496f6e6e09cb969aa69e98b727040b4d96a382972
            elf.dns_seeds = ['testnetseed.terracoin.io']
        else:
            self.default_port = 13333
            self.start_str = b'\x42\xBA\xBE\x56'
            self.spork_address = '13GHJVztHpyoaoPqakuXnmwjUvLYp469VN' #02f1b4c2d95dee0f02de365173ed859b8604f9ce3653ef1f9c7d4723a2b3458b30
            self.dns_seeds = ['seed.terracoin.io',
                              'dnsseed.southofheaven.ca']
        self.network = network
        self.proxy = None
        self.loop = network.asyncio_loop
        self._loop_thread = network._loop_thread
        self.config = network.config

        if config.path:
            self.data_dir = os.path.join(config.path, 'terracoin_net')
            make_dir(self.data_dir)
        else:
            self.data_dir = None

        self.main_taskgroup = None  # type: TaskGroup

        # locks
        self.restart_lock = asyncio.Lock()
        self.callback_lock = threading.Lock()
        self.banlist_lock = threading.RLock()            # <- re-entrant
        self.peers_lock = threading.Lock()  # for mutating/iterating self.peers

        # callbacks set by the GUI
        self.callbacks = defaultdict(list)  # note: needs self.callback_lock

        # set of peers we have an ongoing connection with
        self.peers = {}  # type: Dict[str, TerracoinPeer]
        self.connecting = set()
        self.peers_queue = None
        self.banlist = self._read_banlist()
        self.found_peers = set()

        self.is_cmd_terracoin_peers = not config.is_modifiable('terracoin_peers')
        self.read_conf()

        self._max_peers = self.config.get('terracoin_max_peers', MAX_PEERS_DEFAULT)
        # sporks manager
        self.sporks = TerracoinSporks()

        # Recent islocks and chainlocks data
        self.recent_islock_invs = deque([], 200)
        self.recent_islocks_lock = threading.Lock()
        self.recent_islocks_clear = time.time()
        self.recent_islocks = list()

        # Activity data
        self.read_bytes = 0
        self.read_time = 0
        self.write_bytes = 0
        self.write_time = 0
        self.set_spork_time = 0

        # Dump network messages. Set at runtime from the console.
        self.debug = False
Exemplo n.º 7
0
 def __init__(self):
     self.async_conn = None
     self.get_conn_lock = asyncio.Lock()
Exemplo n.º 8
0
    def __init__(self, config=None):
        global INSTANCE
        INSTANCE = self
        if config is None:
            config = {}  # Do not use mutables as default values!
        self.config = SimpleConfig(config) if isinstance(config,
                                                         dict) else config
        self.num_server = 10 if not self.config.get('oneserver') else 0
        blockchain.blockchains = blockchain.read_blockchains(self.config)
        self.print_error("blockchains", list(blockchain.blockchains.keys()))
        self.blockchain_index = config.get('blockchain_index', 0)
        if self.blockchain_index not in blockchain.blockchains.keys():
            self.blockchain_index = 0
        # Server for addresses and transactions
        self.default_server = self.config.get('server', None)
        # Sanitize default server
        if self.default_server:
            try:
                deserialize_server(self.default_server)
            except:
                self.print_error(
                    'Warning: failed to parse server-string; falling back to random.'
                )
                self.default_server = None
        if not self.default_server:
            self.default_server = pick_random_server()

        # locks: if you need to take multiple ones, acquire them in the order they are defined here!
        self.bhi_lock = asyncio.Lock()
        self.interface_lock = threading.RLock()  # <- re-entrant
        self.callback_lock = threading.Lock()
        self.recent_servers_lock = threading.RLock()  # <- re-entrant

        self.server_peers = {
        }  # returned by interface (servers that the main interface knows about)
        self.recent_servers = self.read_recent_servers(
        )  # note: needs self.recent_servers_lock

        self.banner = ''
        self.donation_address = ''
        self.relay_fee = None
        # callbacks set by the GUI
        self.callbacks = defaultdict(list)  # note: needs self.callback_lock

        dir_path = os.path.join(self.config.path, 'certs')
        util.make_dir(dir_path)

        # retry times
        self.server_retry_time = time.time()
        self.nodes_retry_time = time.time()
        # kick off the network.  interface is the main server we are currently
        # communicating with.  interfaces is the set of servers we are connecting
        # to or have an ongoing connection with
        self.interface = None  # note: needs self.interface_lock
        self.interfaces = {}  # note: needs self.interface_lock
        self.auto_connect = self.config.get('auto_connect', True)
        self.connecting = set()
        self.server_queue = None
        self.server_queue_group = None
        self.asyncio_loop = asyncio.get_event_loop()
        self.start_network(
            deserialize_server(self.default_server)[2],
            deserialize_proxy(self.config.get('proxy')))
Exemplo n.º 9
0
 def __init__(self, rpc_user, rpc_password):
     Logger.__init__(self)
     self.rpc_user = rpc_user
     self.rpc_password = rpc_password
     self.auth_lock = asyncio.Lock()
     self._methods = {}  # type: Dict[str, Callable]
Exemplo n.º 10
0
 def __init__(self, result):
     self._stopped = asyncio.Event()
     self._task = asyncio.ensure_future(self._stopped.wait())
     self._result = result
     self._mutex = asyncio.Lock()
Exemplo n.º 11
0
 def __init__(self, coresys: CoreSys):
     """Initialize Docker base wrapper."""
     self.coresys: CoreSys = coresys
     self._meta: Optional[Dict[str, Any]] = None
     self.lock: asyncio.Lock = asyncio.Lock()
Exemplo n.º 12
0
    async def setup(self, name, controller_name, _do_long_pull=True):
        """Setup the router state

        Args:
            name(str): Used to identify the router when reporting queue
                lengths to the controller.
            controller_name(str): The actor name for the controller.
            _do_long_pull(bool): Used by unit testing.
        """

        # Note: Several queues are used in the router
        # - When a request come in, it's placed inside its corresponding
        #   endpoint_queue.
        # - The endpoint_queue is dequeued during flush operation, which moves
        #   the queries to backend buffer_queue. Here we match a request
        #   for an endpoint to a backend given some policy.
        # - The worker_queue is used to collect idle actor handle. These
        #   handles are dequed during the second stage of flush operation,
        #   which assign queries in buffer_queue to actor handle.

        self.name = name

        # -- Queues -- #

        # endpoint_name -> request queue
        # We use FIFO (left to right) ordering. The new items should be added
        # using appendleft. Old items should be removed via pop().
        self.endpoint_queues: DefaultDict[deque[Query]] = defaultdict(deque)
        # backend_name -> worker replica tag queue
        self.worker_queues: DefaultDict[deque[str]] = defaultdict(deque)
        # backend_name -> worker payload queue
        self.backend_queues = defaultdict(deque)

        # -- Metadata -- #

        # endpoint_name -> traffic_policy
        self.traffic = dict()
        # backend_name -> backend_config
        self.backend_info = dict()
        # replica tag -> worker_handle
        self.replicas = dict()
        # backend_name -> replica_tag -> concurrent queries counter
        self.queries_counter = defaultdict(lambda: defaultdict(int))

        # -- Synchronization -- #

        # This lock guarantee that only one flush operation can happen at a
        # time. Without the lock, multiple flush operation can pop from the
        # same buffer_queue and worker_queue and create deadlock. For example,
        # an operation holding the only query and the other flush operation
        # holding the only idle replica. Additionally, allowing only one flush
        # operation at a time simplifies design overhead for custom queuing and
        # batching policies.
        self.flush_lock = asyncio.Lock()

        # -- State Restoration -- #
        # Fetch the replica handles, traffic policies, and backend configs from
        # the controller. We use a "pull-based" approach instead of pushing
        # them from the controller so that the router can transparently recover
        # from failure.
        self.controller = ray.get_actor(controller_name)

        # -- Metrics Registration -- #
        self.num_router_requests = metrics.Count(
            "num_router_requests",
            description="Number of requests processed by the router.",
            tag_keys=("endpoint", ))
        self.num_error_endpoint_requests = metrics.Count(
            "num_error_endpoint_requests",
            description=(
                "Number of requests that errored when getting results "
                "for the endpoint."),
            tag_keys=("endpoint", ))
        self.num_error_backend_requests = metrics.Count(
            "num_error_backend_requests",
            description=("Number of requests that errored when getting result "
                         "from the backend."),
            tag_keys=("backend", ))

        self.backend_queue_size = metrics.Gauge(
            "backend_queued_queries",
            description=("Current number of queries queued "
                         "in the router for a backend"),
            tag_keys=("backend", ))

        asyncio.get_event_loop().create_task(self.report_queue_lengths())

        if _do_long_pull:
            self.long_poll_client = LongPollerAsyncClient(
                self.controller, {
                    "traffic_policies": self.update_traffic_policies,
                    "worker_handles": self.update_worker_handles,
                    "backend_configs": self.update_backend_configs
                })
Exemplo n.º 13
0
 def __init__(self, writer, loop):
     self.loop = loop
     self.writer = writer
     self.closed = False
     self.write_lock = asyncio.Lock(loop=loop)
Exemplo n.º 14
0
import middlewared.sqlalchemy as sa

from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, Str, ValidationErrors
from middlewared.service import CallError, CRUDService, item_method, private
from middlewared.validators import Range
from middlewared.utils import osc

from .vm_supervisor import VMSupervisorMixin

BOOT_LOADER_OPTIONS = {
    'UEFI': 'UEFI',
    'UEFI_CSM': 'Legacy BIOS',
}
if osc.IS_FREEBSD:
    BOOT_LOADER_OPTIONS['GRUB'] = 'Grub bhyve (specify grub.cfg)'
LIBVIRT_LOCK = asyncio.Lock()
RE_NAME = re.compile(r'^[a-zA-Z_0-9]+$')


class VMModel(sa.Model):
    __tablename__ = 'vm_vm'

    id = sa.Column(sa.Integer(), primary_key=True)
    name = sa.Column(sa.String(150))
    description = sa.Column(sa.String(250))
    vcpus = sa.Column(sa.Integer(), default=1)
    memory = sa.Column(sa.Integer())
    autostart = sa.Column(sa.Boolean(), default=False)
    time = sa.Column(sa.String(5), default='LOCAL')
    grubconfig = sa.Column(sa.Text(), nullable=True)
    bootloader = sa.Column(sa.String(50), default='UEFI')
Exemplo n.º 15
0
 def _init_asgi_lock(self) -> None:
     self.async_lock = asyncio.Lock()
Exemplo n.º 16
0
    def __init__(
        self,
        opp: OpenPeerPowerType,
        broker: str,
        port: int,
        client_id: Optional[str],
        keepalive: Optional[int],
        username: Optional[str],
        password: Optional[str],
        certificate: Optional[str],
        client_key: Optional[str],
        client_cert: Optional[str],
        tls_insecure: Optional[bool],
        protocol: Optional[str],
        will_message: Optional[Message],
        birth_message: Optional[Message],
        tls_version: Optional[int],
    ) -> None:
        """Initialize Open Peer Power MQTT client."""
        # We don't import them on the top because some integrations
        # should be able to optionally rely on MQTT.
        # pylint: disable=import-outside-toplevel
        import paho.mqtt.client as mqtt

        self.opp = opp
        self.broker = broker
        self.port = port
        self.keepalive = keepalive
        self.subscriptions: List[Subscription] = []
        self.birth_message = birth_message
        self.connected = False
        self._mqttc: mqtt.Client = None
        self._paho_lock = asyncio.Lock()

        if protocol == PROTOCOL_31:
            proto: int = mqtt.MQTTv31
        else:
            proto = mqtt.MQTTv311

        if client_id is None:
            self._mqttc = mqtt.Client(protocol=proto)
        else:
            self._mqttc = mqtt.Client(client_id, protocol=proto)

        if username is not None:
            self._mqttc.username_pw_set(username, password)

        if certificate is not None:
            self._mqttc.tls_set(
                certificate,
                certfile=client_cert,
                keyfile=client_key,
                tls_version=tls_version,
            )

            if tls_insecure is not None:
                self._mqttc.tls_insecure_set(tls_insecure)

        self._mqttc.on_connect = self._mqtt_on_connect
        self._mqttc.on_disconnect = self._mqtt_on_disconnect
        self._mqttc.on_message = self._mqtt_on_message

        if will_message is not None:
            self._mqttc.will_set(  # pylint: disable=no-value-for-parameter
                *attr.astuple(will_message))
Exemplo n.º 17
0
from tortoise.functions import Count

from PyQt5.QtCore import QUrl

from galacteek import log
from galacteek import AsyncSignal
from galacteek.core import iptags
from galacteek.core.asynclib import loopTime
from galacteek.ipfs.cidhelpers import IPFSPath

from galacteek.database.models import *  # noqa

from galacteek.database.ops.bm import *  # noqa
from galacteek.database.ops.pinning import *  # noqa

databaseLock = asyncio.Lock()


def dbLock(func):
    @functools.wraps(func)
    async def wrapper(*args, **kw):
        async with databaseLock:
            return await func(*args, **kw)

    return wrapper


async def initOrm(dbpath):
    log.debug('ORM init: {}'.format(dbpath))

    try:
Exemplo n.º 18
0
    def __init__(self, config: SimpleConfig = None):
        global INSTANCE
        INSTANCE = self

        self.asyncio_loop = asyncio.get_event_loop()
        assert self.asyncio_loop.is_running(), "event loop not running"
        self._loop_thread = None  # type: threading.Thread  # set by caller; only used for sanity checks

        if config is None:
            config = {}  # Do not use mutables as default values!
        self.config = SimpleConfig(config) if isinstance(
            config, dict) else config  # type: SimpleConfig
        blockchain.read_blockchains(self.config)
        self.print_error(
            "blockchains",
            list(map(lambda b: b.forkpoint, blockchain.blockchains.values())))
        self._blockchain_preferred_block = self.config.get(
            'blockchain_preferred_block', None)  # type: Optional[Dict]
        self._blockchain = blockchain.get_best_chain()
        # Server for addresses and transactions
        self.default_server = self.config.get('server', None)
        # Sanitize default server
        if self.default_server:
            try:
                deserialize_server(self.default_server)
            except:
                self.print_error(
                    'Warning: failed to parse server-string; falling back to random.'
                )
                self.default_server = None
        if not self.default_server:
            self.default_server = pick_random_server()

        self.main_taskgroup = None  # type: TaskGroup

        # locks
        self.restart_lock = asyncio.Lock()
        self.bhi_lock = asyncio.Lock()
        self.callback_lock = threading.Lock()
        self.recent_servers_lock = threading.RLock()  # <- re-entrant
        self.interfaces_lock = threading.Lock(
        )  # for mutating/iterating self.interfaces

        self.server_peers = {
        }  # returned by interface (servers that the main interface knows about)
        self.recent_servers = self._read_recent_servers(
        )  # note: needs self.recent_servers_lock

        self.banner = ''
        self.donation_address = ''
        self.relay_fee = None  # type: Optional[int]
        # callbacks set by the GUI
        self.callbacks = defaultdict(list)  # note: needs self.callback_lock

        dir_path = os.path.join(self.config.path, 'certs')
        util.make_dir(dir_path)

        # retry times
        self.server_retry_time = time.time()
        self.nodes_retry_time = time.time()
        # the main server we are currently communicating with
        self.interface = None  # type: Interface
        # set of servers we have an ongoing connection with
        self.interfaces = {}  # type: Dict[str, Interface]
        self.auto_connect = self.config.get('auto_connect', True)
        self.connecting = set()
        self.server_queue = None
        self.proxy = None

        self._set_status('disconnected')
Exemplo n.º 19
0
 def __init__(self, url, number=0):
     self.url = url
     self.db_number = number
     self.started = False
     self.connection = None
     self.startup_lock = asyncio.Lock()
Exemplo n.º 20
0
class TokenAuthHttpDownloader(HttpDownloader):
    """
    Custom Downloader that automatically handles Token Authentication.

    Additionally, use custom headers from DeclarativeArtifact.extra_data['headers']
    """

    token = {'token': None}
    token_lock = asyncio.Lock()

    def __init__(self, *args, **kwargs):
        """
        Initialize the downloader.
        """
        self.remote = kwargs.pop('remote')
        super().__init__(*args, **kwargs)

    @backoff.on_exception(backoff.expo, ClientResponseError, max_tries=10, giveup=http_giveup)
    async def _run(self, handle_401=True, extra_data=None):
        """
        Download, validate, and compute digests on the `url`. This is a coroutine.

        This method is decorated with a backoff-and-retry behavior to retry HTTP 429 errors. It
        retries with exponential backoff 10 times before allowing a final exception to be raised.

        This method provides the same return object type and documented in
        :meth:`~pulpcore.plugin.download.BaseDownloader._run`.

        Args:
            handle_401(bool): If true, catch 401, request a new token and retry.
        """
        headers = {}
        if extra_data is not None:
            headers = extra_data.get('headers', headers)
        this_token = self.token['token']
        auth_headers = self.auth_header(this_token)
        headers.update(auth_headers)
        async with self.session.get(self.url, headers=headers) as response:
            try:
                response.raise_for_status()
            except ClientResponseError as e:
                response_auth_header = response.headers.get('www-authenticate')
                # Need to retry request
                if handle_401 and e.status == 401 and response_auth_header is not None:
                    # Token has not been updated during request
                    if self.token['token'] is None or \
                       self.token['token'] == this_token:

                        self.token['token'] = None
                        await self.update_token(response_auth_header, this_token)
                    return await self._run(handle_401=False)
                else:
                    raise
            to_return = await self._handle_response(response)
            await response.release()
            self.response_headers = response.headers

        if self._close_session_on_finalize:
            self.session.close()
        return to_return

    async def update_token(self, response_auth_header, used_token):
        """
        Update the Bearer token to be used with all requests.
        """
        async with self.token_lock:
            if self.token['token'] is not None and self.token['token'] == used_token:
                return
            log.info("Updating bearer token")
            bearer_info_string = response_auth_header[len("Bearer "):]
            bearer_info_list = re.split(',(?=[^=,]+=)', bearer_info_string)

            # The remaining string consists of comma seperated key=value pairs
            auth_query_dict = {}
            for key, value in (item.split('=') for item in bearer_info_list):
                # The value is a string within a string, ex: '"value"'
                auth_query_dict[key] = json.loads(value)
            try:
                token_base_url = auth_query_dict.pop('realm')
            except KeyError:
                raise IOError(_("No realm specified for token auth challenge."))

            # Construct a url with query parameters containing token auth challenge info
            parsed_url = parse.urlparse(token_base_url)
            # Add auth query params to query dict and urlencode into a string
            new_query = parse.urlencode({**parse.parse_qs(parsed_url.query), **auth_query_dict})
            updated_parsed = parsed_url._replace(query=new_query)
            token_url = parse.urlunparse(updated_parsed)

            async with self.session.get(token_url, raise_for_status=True) as token_response:
                token_data = await token_response.text()

            self.token['token'] = json.loads(token_data)['token']

    @staticmethod
    def auth_header(token):
        """
        Create an auth header that optionally includes a bearer token.

        Args:
            token (str): Bearer token to use in header

        Returns:
            dictionary: containing Authorization headers or {} if token is None.

        """
        if token is not None:
            return {'Authorization': 'Bearer {token}'.format(token=token)}
        return {}
Exemplo n.º 21
0
    async def request(self, route, *, files=None, form=None, **kwargs):
        bucket = route.bucket
        method = route.method
        url = route.url

        lock = self._locks.get(bucket)
        if lock is None:
            lock = asyncio.Lock()
            if bucket is not None:
                self._locks[bucket] = lock

        # header creation
        headers = {
            'User-Agent': self.user_agent,
            'X-Ratelimit-Precision': 'millisecond',
        }

        if self.token is not None:
            headers['Authorization'] = 'Bot ' + self.token if self.bot_token else self.token
        # some checking if it's a JSON request
        if 'json' in kwargs:
            headers['Content-Type'] = 'application/json'
            kwargs['data'] = utils.to_json(kwargs.pop('json'))

        try:
            reason = kwargs.pop('reason')
        except KeyError:
            pass
        else:
            if reason:
                headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')

        kwargs['headers'] = headers

        # Proxy support
        if self.proxy is not None:
            kwargs['proxy'] = self.proxy
        if self.proxy_auth is not None:
            kwargs['proxy_auth'] = self.proxy_auth

        if not self._global_over.is_set():
            # wait until the global lock is complete
            await self._global_over.wait()

        await lock.acquire()
        with MaybeUnlock(lock) as maybe_lock:
            for tries in range(5):
                if files:
                    for f in files:
                        f.reset(seek=tries)

                if form:
                    form_data = aiohttp.FormData()
                    for params in form:
                        form_data.add_field(**params)
                    kwargs['data'] = form_data

                try:
                    async with self.__session.request(method, url, **kwargs) as r:
                        log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), r.status)

                        # even errors have text involved in them so this is safe to call
                        data = await json_or_text(r)

                        # check if we have rate limit header information
                        remaining = r.headers.get('X-Ratelimit-Remaining')
                        if remaining == '0' and r.status != 429:
                            # we've depleted our current bucket
                            delta = utils._parse_ratelimit_header(r, use_clock=self.use_clock)
                            log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
                            maybe_lock.defer()
                            self.loop.call_later(delta, lock.release)

                        # the request was successful so just return the text/json
                        if 300 > r.status >= 200:
                            log.debug('%s %s has received %s', method, url, data)
                            return data

                        # we are being rate limited
                        if r.status == 429:
                            if not r.headers.get('Via'):
                                # Banned by Cloudflare more than likely.
                                raise HTTPException(r, data)

                            fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"'

                            # sleep a bit
                            retry_after = data['retry_after'] / 1000.0
                            log.warning(fmt, retry_after, bucket)

                            # check if it's a global rate limit
                            is_global = data.get('global', False)
                            if is_global:
                                log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
                                self._global_over.clear()

                            await asyncio.sleep(retry_after)
                            log.debug('Done sleeping for the rate limit. Retrying...')

                            # release the global lock now that the
                            # global rate limit has passed
                            if is_global:
                                self._global_over.set()
                                log.debug('Global rate limit is now over.')

                            continue

                        # we've received a 500 or 502, unconditional retry
                        if r.status in {500, 502}:
                            await asyncio.sleep(1 + tries * 2)
                            continue

                        # the usual error cases
                        if r.status == 403:
                            raise Forbidden(r, data)
                        elif r.status == 404:
                            raise NotFound(r, data)
                        elif r.status == 503:
                            raise DiscordServerError(r, data)
                        else:
                            raise HTTPException(r, data)

                # This is handling exceptions from the request
                except OSError as e:
                    # Connection reset by peer
                    if tries < 4 and e.errno in (54, 10054):
                        continue
                    raise

            # We've run out of retries, raise.
            if r.status >= 500:
                raise DiscordServerError(r, data)

            raise HTTPException(r, data)
Exemplo n.º 22
0
    def __init__(self,
                 *,
                 loop=None,
                 bootstrap_servers='localhost',
                 client_id='aiokafka-' + __version__,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 retry_backoff_ms=100,
                 ssl_context=None,
                 security_protocol='PLAINTEXT',
                 api_version='auto',
                 connections_max_idle_ms=540000,
                 sasl_mechanism='PLAIN',
                 sasl_plain_username=None,
                 sasl_plain_password=None,
                 sasl_kerberos_service_name='kafka',
                 sasl_kerberos_domain_name=None,
                 sasl_oauth_token_provider=None):
        if loop is None:
            loop = get_running_loop()

        if security_protocol not in ('SSL', 'PLAINTEXT', 'SASL_PLAINTEXT',
                                     'SASL_SSL'):
            raise ValueError("`security_protocol` should be SSL or PLAINTEXT")
        if security_protocol in ["SSL", "SASL_SSL"] and ssl_context is None:
            raise ValueError(
                "`ssl_context` is mandatory if security_protocol=='SSL'")
        if security_protocol in ["SASL_SSL", "SASL_PLAINTEXT"]:
            if sasl_mechanism not in ("PLAIN", "GSSAPI", "SCRAM-SHA-256",
                                      "SCRAM-SHA-512", "OAUTHBEARER"):
                raise ValueError("only `PLAIN`, `GSSAPI`, `SCRAM-SHA-256`, "
                                 "`SCRAM-SHA-512` and `OAUTHBEARER`"
                                 "sasl_mechanism are supported "
                                 "at the moment")
            if sasl_mechanism == "PLAIN" and \
               (sasl_plain_username is None or sasl_plain_password is None):
                raise ValueError(
                    "sasl_plain_username and sasl_plain_password required for "
                    "PLAIN sasl")

        self._bootstrap_servers = bootstrap_servers
        self._client_id = client_id
        self._metadata_max_age_ms = metadata_max_age_ms
        self._request_timeout_ms = request_timeout_ms
        if api_version != "auto":
            api_version = parse_kafka_version(api_version)
        self._api_version = api_version
        self._security_protocol = security_protocol
        self._ssl_context = ssl_context
        self._retry_backoff = retry_backoff_ms / 1000
        self._connections_max_idle_ms = connections_max_idle_ms
        self._sasl_mechanism = sasl_mechanism
        self._sasl_plain_username = sasl_plain_username
        self._sasl_plain_password = sasl_plain_password
        self._sasl_kerberos_service_name = sasl_kerberos_service_name
        self._sasl_kerberos_domain_name = sasl_kerberos_domain_name
        self._sasl_oauth_token_provider = sasl_oauth_token_provider

        self.cluster = ClusterMetadata(metadata_max_age_ms=metadata_max_age_ms)

        self._topics = set()  # empty set will fetch all topic metadata
        self._conns = {}
        self._loop = loop
        self._sync_task = None

        self._md_update_fut = None
        self._md_update_waiter = create_future()
        self._get_conn_lock = asyncio.Lock()
Exemplo n.º 23
0
 def __init__(self, db):
     super().__init__()
     self._db = db
     self._lock = asyncio.Lock()
     self._predict_tasks = asyncio.Queue()
Exemplo n.º 24
0
class DiskService(Service):
    smartctl_args_for_disk = {}
    smartctl_args_for_device_lock = asyncio.Lock()

    @private
    async def update_smartctl_args_for_disks(self):
        await self.smartctl_args_for_device_lock.acquire()

        async def update():
            try:
                disks = [
                    disk['devname'] for disk in await self.middleware.call(
                        'disk.query',
                        [
                            ['devname', '!=', None],
                        ],
                    )
                ]

                devices = await camcontrol_list()

                self.smartctl_args_for_disk = dict(
                    zip(
                        disks, await asyncio_map(
                            functools.partial(get_smartctl_args,
                                              self.middleware, devices), disks,
                            8)))
            except Exception:
                self.logger.error("update_smartctl_args_for_disks failed",
                                  exc_info=True)
            finally:
                self.smartctl_args_for_device_lock.release()

        asyncio.ensure_future(update())

    @private
    async def smartctl_args(self, disk):
        async with self.smartctl_args_for_device_lock:
            return self.smartctl_args_for_disk.get(disk)

    @accepts(
        Str('disk'),
        List('args', items=[Str('arg')]),
        Dict(
            'options',
            Bool('cache', default=True),
            Bool('silent', default=False),
        ),
    )
    @private
    async def smartctl(self, disk, args, options):
        try:
            if options['cache']:
                smartctl_args = await self.middleware.call(
                    'disk.smartctl_args', disk)
            else:
                devices = await camcontrol_list()
                smartctl_args = await get_smartctl_args(
                    self.middleware, devices, disk)

            if smartctl_args is None:
                raise CallError(f'S.M.A.R.T. is unavailable for disk {disk}')

            cp = await smartctl(smartctl_args + args,
                                check=False,
                                stderr=subprocess.STDOUT,
                                encoding='utf8',
                                errors='ignore')
            if (cp.returncode & 0b11) != 0:
                raise CallError(
                    f'smartctl failed for disk {disk}:\n{cp.stdout}')
        except CallError:
            if options['silent']:
                return None

            raise

        return cp.stdout
Exemplo n.º 25
0
 def __init__(self):
     self._lock = asyncio.Lock()
Exemplo n.º 26
0
    def __init__(self, bot):
        super().__init__(bot)

        self.guesses_lock = asyncio.Lock()
        self.guesses = LoadPickleFile(ot_datafile)
        self.standings = LoadPickleFile(otstandings_datafile)
Exemplo n.º 27
0
async def _get_lock(key: str) -> asyncio.Lock:
    async with _CH_LKS_LK:
        if key not in _CH_LKS:
            _CH_LKS[key] = asyncio.Lock()
    return _CH_LKS[key]
async def lock_manager():
    lock = asyncio.Lock()
    await asyncio.gather(*[func1(i, lock) for i in range(10)])
Exemplo n.º 29
0
    def __init__(self, *args, loop=asyncio.get_event_loop(), **kwargs):
        super().__init__(*args, **kwargs)
        self._loop = loop

        self._write_lock = asyncio.Lock()
        self._read_lock = asyncio.Lock()
 def __init__(self, credential, *scopes, **kwargs):
     super().__init__(credential, *scopes, **kwargs)
     self._lock = asyncio.Lock()