예제 #1
0
    def test_ttl(self):
        cache = TTLCache(maxsize=2, ttl=2, timer=Timer())
        self.assertEqual(0, cache.timer())
        self.assertEqual(2, cache.ttl)

        cache[1] = 1
        self.assertEqual(1, cache[1])
        self.assertEqual(1, len(cache))
        self.assertEqual({1}, set(cache))

        cache.timer.tick()
        self.assertEqual(1, cache[1])
        self.assertEqual(1, len(cache))
        self.assertEqual({1}, set(cache))

        cache[2] = 2
        self.assertEqual(1, cache[1])
        self.assertEqual(2, cache[2])
        self.assertEqual(2, len(cache))
        self.assertEqual({1, 2}, set(cache))

        cache.timer.tick()
        self.assertNotIn(1, cache)
        self.assertEqual(2, cache[2])
        self.assertEqual(1, len(cache))
        self.assertEqual({2}, set(cache))

        cache[3] = 3
        self.assertNotIn(1, cache)
        self.assertEqual(2, cache[2])
        self.assertEqual(3, cache[3])
        self.assertEqual(2, len(cache))
        self.assertEqual({2, 3}, set(cache))

        cache.timer.tick()
        self.assertNotIn(1, cache)
        self.assertNotIn(2, cache)
        self.assertEqual(3, cache[3])
        self.assertEqual(1, len(cache))
        self.assertEqual({3}, set(cache))

        cache.timer.tick()
        self.assertNotIn(1, cache)
        self.assertNotIn(2, cache)
        self.assertNotIn(3, cache)

        with self.assertRaises(KeyError):
            del cache[1]
        with self.assertRaises(KeyError):
            cache.pop(2)
        with self.assertRaises(KeyError):
            del cache[3]

        self.assertEqual(0, len(cache))
        self.assertEqual(set(), set(cache))
예제 #2
0
파일: test_ttl.py 프로젝트: tkem/cachetools
    def test_ttl(self):
        cache = TTLCache(maxsize=2, ttl=1, timer=Timer())
        self.assertEqual(0, cache.timer())
        self.assertEqual(1, cache.ttl)

        cache[1] = 1
        self.assertEqual({1}, set(cache))
        self.assertEqual(1, len(cache))
        self.assertEqual(1, cache[1])

        cache.timer.tick()
        self.assertEqual({1}, set(cache))
        self.assertEqual(1, len(cache))
        self.assertEqual(1, cache[1])

        cache[2] = 2
        self.assertEqual({1, 2}, set(cache))
        self.assertEqual(2, len(cache))
        self.assertEqual(1, cache[1])
        self.assertEqual(2, cache[2])

        cache.timer.tick()
        self.assertEqual({2}, set(cache))
        self.assertEqual(1, len(cache))
        self.assertNotIn(1, cache)
        self.assertEqual(2, cache[2])

        cache[3] = 3
        self.assertEqual({2, 3}, set(cache))
        self.assertEqual(2, len(cache))
        self.assertNotIn(1, cache)
        self.assertEqual(2, cache[2])
        self.assertEqual(3, cache[3])

        cache.timer.tick()
        self.assertEqual({3}, set(cache))
        self.assertEqual(1, len(cache))
        self.assertNotIn(1, cache)
        self.assertNotIn(2, cache)
        self.assertEqual(3, cache[3])

        cache.timer.tick()
        self.assertEqual(set(), set(cache))
        self.assertEqual(0, len(cache))
        self.assertNotIn(1, cache)
        self.assertNotIn(2, cache)
        self.assertNotIn(3, cache)

        with self.assertRaises(KeyError):
            del cache[1]
        with self.assertRaises(KeyError):
            cache.pop(2)
        with self.assertRaises(KeyError):
            del cache[3]
예제 #3
0
class MemoryCache:
    #
    # コンストラクタ
    #
    def __init__(self):
        self._cache = TTLCache(maxsize=1024, ttl=3600)

    #
    # set
    #
    def set(self, key, value):
        self._cache[key] = value

    #
    # get
    #
    def get(self, key):
        if key in self._cache: return self._cache[key]
        return None

    #
    # delete
    #
    def delete(self, key):
        return self._cache.pop(key)
예제 #4
0
 def test_atomic(self):
     cache = TTLCache(maxsize=1, ttl=1, timer=Timer(auto=True))
     cache[1] = 1
     self.assertEqual(1, cache[1])
     cache[1] = 1
     self.assertEqual(1, cache.get(1))
     cache[1] = 1
     self.assertEqual(1, cache.pop(1))
     cache[1] = 1
     self.assertEqual(1, cache.setdefault(1))
예제 #5
0
 def test_atomic(self):
     cache = TTLCache(maxsize=1, ttl=1, timer=Timer(auto=True))
     cache[1] = 1
     self.assertEqual(1, cache[1])
     cache[1] = 1
     self.assertEqual(1, cache.get(1))
     cache[1] = 1
     self.assertEqual(1, cache.pop(1))
     cache[1] = 1
     self.assertEqual(1, cache.setdefault(1))
예제 #6
0
파일: base.py 프로젝트: Catstyle/applied
class BaseBackend:

    TIMEOUT = 30 * 1000

    def __init__(self, ttl):
        self.ttl = ttl
        self.values = TTLCache(maxsize=1024, ttl=ttl / 1000)

    def load_data(self, key: str, data):
        try:
            self.values[key] = json.loads(data)
        except (TypeError, ValueError):
            self.values[key] = data

    def fetch_value(self, key: str):
        pass

    def get(self, key: str) -> Union[bytes, dict]:
        if key not in self.values:
            self.fetch_value(key)
        try:
            # still maybe empty
            return self.values[key]
        except KeyError:
            return MISSING

    def clear(self, key: str):
        self.values.pop(key, None)

    def save(self, key: str, value: Any, ttl: int = None):
        self.values[key] = value

    def wait(self, key: str, timeout: int):
        # same as get in BaseBackend
        return self.get(key)

    def request_renew(self, key: str, value: str, timeout: int):
        return True

    def finish_renew(self, key: str, value: str):
        pass
예제 #7
0
class FuturesTrader:
    def __init__(self):
        self.client: AsyncClient = None
        self.state: dict = None
        self.prices: dict = {}
        self.symbols: dict = {}
        self.price_streamer = None
        self.clocks = NamedLock()
        self.olock = asyncio.Lock()  # lock to place only one order at a time
        self.slock = asyncio.Lock()  # lock for stream subscriptions
        self.order_queue = asyncio.Queue()
        # cache to disallow orders with same symbol, entry and first TP for 10 mins
        self.sig_cache = TTLCache(maxsize=100, ttl=600)
        self.balance = 0
        self.results_handler = None

    async def init(self, api_key, api_secret, state={}, test=False, loop=None):
        self.state = state
        self.client = await AsyncClient.create(api_key=api_key,
                                               api_secret=api_secret,
                                               testnet=test,
                                               loop=loop)
        self.manager = BinanceSocketManager(self.client, loop=loop)
        self.user_stream = UserStream(api_key, api_secret, test=test)
        if not self.state.get("streams"):
            self.state["streams"] = []
        if not self.state.get("orders"):
            self.state["orders"] = {}
        await self._gather_orders()
        await self._watch_orders()
        await self._subscribe_futures_user()
        resp = await self.client.futures_exchange_info()
        for info in resp["symbols"]:
            self.symbols[info["symbol"]] = info
        resp = await self.client.futures_account_balance()
        for item in resp:
            if item["asset"] == "USDT":
                self.balance = float(item["balance"])
        logging.info(f"Account balance: {self.balance} USDT", on="blue")

    async def queue_signal(self, signal: Signal):
        await self.order_queue.put(signal)

    async def close_trades(self, tag, coin=None):
        if coin is None:
            logging.info(
                f"Attempting to close all trades associated with channel {tag}",
                color="yellow")
        else:
            logging.info(
                f"Attempting to close {coin} trades associated with channel {tag}",
                color="yellow")
        async with self.olock:
            removed = []
            for order_id, order in self.state["orders"].items():
                if order.get("tag") != tag:
                    continue
                if coin is not None and order["sym"] != f"{coin}USDT":
                    continue
                children = [] + order["t_ord"]
                if order.get("s_ord"):
                    children.append(order["s_ord"])
                removed += children
                for oid in children:
                    await self._cancel_order(oid, order["sym"])
                quantity = 0
                for tid, q in zip(order["t_ord"], order["t_q"]):
                    if not self.state["orders"].get(tid, {}).get("filled"):
                        quantity += q
                try:
                    if quantity > 0:
                        resp = await self.client.futures_create_order(
                            symbol=order["sym"],
                            positionSide="LONG"
                            if order["side"] == "BUY" else "SHORT",
                            side="SELL" if order["side"] == "BUY" else "BUY",
                            type=OrderType.MARKET,
                            quantity=self._round_qty(order["sym"], quantity),
                        )
                    else:
                        resp = await self.client.futures_cancel_order(
                            symbol=order["sym"],
                            origClientOrderId=order_id,
                        )
                    logging.info(
                        f"Closed position for order {order}, resp: {resp}",
                        color="yellow")
                except Exception as err:
                    logging.error(
                        f"Failed to close position for order {order}, err: {err}"
                    )
            for oid in removed:
                self.state["orders"].pop(oid, None)
            if not removed:
                logging.info(
                    f"Didn't find any matching positions for {tag} to close",
                    color="yellow")

    async def _gather_orders(self):
        async def _gatherer():
            logging.info("Waiting for orders to be queued...")
            while True:
                signal = await self.order_queue.get()
                if self.symbols.get(f"{signal.coin}USDT") is None:
                    logging.info(f"Unknown symbol {signal.coin} in signal",
                                 color="yellow")
                    continue

                async def _process(signal):
                    # Process one order at a time for each symbol
                    async with self.clocks.lock(signal.coin):
                        registered = await self._register_order_for_signal(
                            signal)
                        if not registered:
                            logging.info(
                                f"Ignoring signal from {signal.tag} because order exists "
                                f"for {signal.coin}",
                                color="yellow")
                            return
                        for i in range(ORDER_MAX_RETRIES):
                            try:
                                await self._place_order(signal)
                                return
                            except PriceUnavailableException:
                                logging.info(
                                    f"Price unavailable for {signal.coin}",
                                    color="red")
                            except EntryCrossedException as err:
                                logging.info(
                                    f"Price went too fast ({err.price}) for signal {signal}",
                                    color="yellow")
                            except InsufficientQuantityException as err:
                                logging.info(
                                    f"Allocated ${round(err.alloc_funds, 2)} for {err.alloc_q} {signal.coin} "
                                    f"but requires ${round(err.est_funds, 2)} for {err.est_q} {signal.coin}",
                                    color="red")
                            except Exception as err:
                                logging.error(
                                    f"Failed to place order: {traceback.format_exc()} {err}"
                                )
                                break  # unknown error - don't block future signals
                            if i < ORDER_MAX_RETRIES - 1:
                                await asyncio.sleep(ORDER_RETRY_SLEEP)
                        await self._unregister_order(signal)

                asyncio.ensure_future(_process(signal))

        asyncio.ensure_future(_gatherer())

    async def _place_order(self, signal: Signal):
        await self._subscribe_futures(signal.coin)
        for _ in range(10):
            if self.prices.get(signal.coin) is not None:
                break
            logging.info(f"Waiting for {signal.coin} price to be available")
            await asyncio.sleep(1)
        if self.prices.get(signal.coin) is None:
            raise PriceUnavailableException()

        symbol = f"{signal.coin}USDT"
        logging.info(f"Modifying leverage to {signal.leverage}x for {symbol}",
                     color="green")
        await self.client.futures_change_leverage(symbol=symbol,
                                                  leverage=signal.leverage)
        price = self.prices[signal.coin]
        signal.correct(price)
        alloc_funds = self.balance * signal.fraction
        quantity = alloc_funds / (price / signal.leverage)
        logging.info(f"Corrected signal: {signal}", color="cyan")
        qty = self._round_qty(symbol, quantity)
        est_funds = qty * signal.entry / signal.leverage
        if (est_funds / alloc_funds) > PRICE_SLIPPAGE:
            raise InsufficientQuantityException(quantity, alloc_funds, qty,
                                                est_funds)

        order_id = OrderID.wait() if signal.wait_entry else OrderID.market()
        params = {
            "symbol": symbol,
            "positionSide": "LONG" if signal.is_long else "SHORT",
            "side": "BUY" if signal.is_long else "SELL",
            "type": OrderType.MARKET,
            "quantity": qty,
            "newClientOrderId": order_id,
        }

        if (signal.is_long and price > signal.max_entry) or (
                signal.is_short and price < signal.max_entry):
            raise EntryCrossedException(price)

        if signal.wait_entry:
            logging.info(
                f"Placing stop limit order for {signal.coin} (price @ {price}, entry @ {signal.entry})"
            )
            params["type"] = OrderType.STOP
            params["stopPrice"] = self._round_price(symbol, signal.entry)
            params["price"] = self._round_price(symbol, signal.max_entry)
        else:
            logging.info(
                f"Placing market order for {signal.coin} (price @ {price}, entry @ {signal.entry}"
            )

        async with self.olock:  # Lock only for interacting with orders
            try:
                resp = await self.client.futures_create_order(**params)
                self.state["orders"][order_id] = {
                    "id": resp["orderId"],
                    "qty": float(resp["origQty"]),
                    "sym": symbol,
                    "side": params["side"],
                    "ent": signal.entry if signal.wait_entry else price,
                    "sl": signal.sl,
                    "tgt": signal.targets,
                    "fnd": alloc_funds,
                    "lev": signal.leverage,
                    "tag": signal.tag,
                    "crt": int(time.time()),
                    "t_ord": [],
                    "t_q": [],
                }
                logging.info(f"Created order {order_id} for signal: {signal}, "
                             f"params: {json.dumps(params)}, resp: {resp}")
            except Exception as err:
                logging.error(
                    f"Failed to create order for signal {signal}: {err}, "
                    f"params: {json.dumps(params)}")
                if isinstance(err, BinanceAPIException) and err.code == -2021:
                    raise EntryCrossedException(price)

    async def _place_collection_orders(self, order_id):
        await self._place_sl_order(order_id)
        async with self.olock:
            odata = self.state["orders"][order_id]
            await self.results_handler(
                Trade.entry(odata["tag"], odata["sym"], odata["ent"],
                            odata["qty"], odata["lev"], odata["side"]))
            if odata.get("t_ord"):
                logging.warning(
                    f"TP order(s) already exist for parent {order_id}")
                return

            targets = odata["tgt"][:MAX_TARGETS]
            remaining = quantity = odata["qty"]
            for i, tgt in enumerate(targets):
                quantity *= 0.5
                # NOTE: Don't close position (as it'll affect other orders)
                if i == len(targets) - 1:
                    quantity = self._round_qty(odata["sym"], remaining)
                else:
                    quantity = self._round_qty(odata["sym"], quantity)
                tgt_order_id = await self._create_target_order(
                    order_id, odata["sym"], odata["side"], tgt, quantity)
                if tgt_order_id is None:
                    continue
                odata["t_ord"].append(tgt_order_id)
                odata["t_q"].append(quantity)
                self.state["orders"][tgt_order_id] = {
                    "parent": order_id,
                    "filled": False,
                }
                remaining -= quantity

    async def _create_target_order(self, order_id, symbol, side, tgt_price,
                                   rounded_qty):
        tgt_order_id = OrderID.target()
        params = {
            "symbol": symbol,
            "type": OrderType.LIMIT,
            "timeInForce": "GTC",
            "positionSide": "LONG" if side == "BUY" else "SHORT",
            "side": "SELL" if side == "BUY" else "BUY",
            "newClientOrderId": tgt_order_id,
            "price": self._round_price(symbol, tgt_price),
            "quantity": rounded_qty,
        }
        try:
            resp = await self.client.futures_create_order(**params)
            logging.info(
                f"Created limit order {tgt_order_id} for parent {order_id}, "
                f"resp: {resp}, params: {json.dumps(params)}")
            return tgt_order_id
        except Exception as err:
            logging.error(
                f"Failed to create target order for parent {order_id}: {err}, "
                f"params: {json.dumps(params)}")

    async def _subscribe_futures_user(self):
        async def _handler():
            while True:
                async with self.user_stream.message() as msg:
                    try:
                        data = msg
                        event = msg['e']
                        if event == UserEventType.AccountUpdate:
                            data = msg["a"]
                        elif event == UserEventType.OrderTradeUpdate:
                            data = msg["o"]
                        elif event == UserEventType.AccountConfigUpdate:
                            data = msg.get("ac", msg.get("ai"))
                        logging.info(f"{event}: {data}")
                        await self._handle_event(msg)
                    except Exception as err:
                        logging.exception(
                            f"Failed to handle event {msg}: {err}")

        asyncio.ensure_future(_handler())

    async def _handle_event(self, msg: dict):
        if msg["e"] == UserEventType.AccountUpdate:
            for info in msg["a"]["B"]:
                if info["a"] == "USDT":
                    self.balance = float(info["cw"])
                    logging.info(f"Account balance: {self.balance} USDT",
                                 on="blue")
        elif msg["e"] == UserEventType.OrderTradeUpdate:
            info = msg["o"]
            order_id = info["c"]
            async with self.olock:
                o = self.state["orders"].get(order_id)
                if o is None:
                    logging.warning(
                        f"Received order {order_id} but missing in state")
                    return
            if info["X"] == "FILLED":
                if OrderID.is_wait(order_id) or OrderID.is_market(order_id):
                    entry = float(info["ap"])
                    logging.info(
                        f"Placing TP/SL orders for fulfilled order {order_id} (entry: {entry})",
                        color="green")
                    async with self.olock:
                        self.state["orders"][order_id]["ent"] = entry
                    await self._place_collection_orders(order_id)
                elif OrderID.is_stop_loss(order_id):
                    async with self.olock:
                        logging.info(
                            f"Order {order_id} hit stop loss. Removing TP orders...",
                            color="red")
                        sl = self.state["orders"].pop(order_id)
                        parent = self.state["orders"].pop(sl["parent"])
                        for oid in parent["t_ord"]:
                            self.state["orders"].pop(
                                oid, None)  # It might not exist
                            await self._cancel_order(oid, parent["sym"])
                        await self.results_handler(
                            Trade.target(parent["tag"],
                                         parent["sym"],
                                         parent["ent"],
                                         parent["qty"],
                                         parent["lev"],
                                         float(info["ap"]),
                                         float(info["q"]),
                                         is_long=parent["side"] == "BUY",
                                         is_sl=True))
                elif OrderID.is_target(order_id):
                    logging.info(f"TP order {order_id} hit.", color="green")
                    await self._move_stop_loss(order_id)

    async def _move_stop_loss(self, tp_id: str):
        async with self.olock:
            tp = self.state["orders"][tp_id]
            tp["filled"] = True
            parent = self.state["orders"][tp["parent"]]
            targets = parent["t_ord"]
            if tp_id not in targets:
                if parent.get("s_ord") is None:
                    logging.warning(f"SL doesn't exist for order {parent}")
                    return
                logging.warning(
                    f"Couldn't find TP order {tp_id} in parent {parent}, closing trade",
                    color="red")
                await self.close_trades(parent["tag"],
                                        parent["sym"].replace("USDT", ""))
                return

            idx = targets.index(tp_id)
            await self.results_handler(
                Trade.target(parent["tag"],
                             parent["sym"],
                             parent["ent"],
                             parent["qty"],
                             parent["lev"],
                             parent["tgt"][idx],
                             parent["t_q"][idx],
                             is_long=parent["side"] == "BUY"))

            if tp_id == targets[-1]:
                logging.info(f"All TP orders hit. Removing parent {parent}")
                parent = self.state["orders"].pop(tp["parent"])
                for oid in parent["t_ord"]:
                    self.state["orders"].pop(oid, None)  # It might not exist
                self.state["orders"].pop(parent["s_ord"])
                await self._cancel_order(parent["s_ord"], parent["sym"])
                return
            else:
                new_price, quantity = parent["ent"], sum(parent["t_q"][(idx +
                                                                        1):])

        await self._place_sl_order(tp["parent"], new_price, quantity)

    async def _place_sl_order(self,
                              parent_id: str,
                              new_price=None,
                              quantity=None):
        async with self.olock:
            odata = self.state["orders"][parent_id]
            symbol = odata["sym"]
            sl_order_id = OrderID.stop_loss()
            if odata.get("s_ord") is not None:
                logging.info(
                    f"Moving SL order for {parent_id} to new price {new_price}"
                )
                await self._cancel_order(odata["s_ord"], symbol)
            params = {
                "symbol":
                symbol,
                "positionSide":
                "LONG" if odata["side"] == "BUY" else "SHORT",
                "side":
                "SELL" if odata["side"] == "BUY" else "BUY",
                "type":
                OrderType.STOP_MARKET,
                "newClientOrderId":
                sl_order_id,
                "stopPrice":
                self._round_price(
                    symbol,
                    new_price if new_price is not None else odata["sl"]),
                "quantity":
                self._round_qty(
                    symbol,
                    (quantity if quantity is not None else odata["qty"])),
            }
            for _ in range(2):
                try:
                    resp = await self.client.futures_create_order(**params)
                    odata["s_ord"] = sl_order_id
                    self.state["orders"][sl_order_id] = {
                        "parent": parent_id,
                        "filled": False,
                    }
                    logging.info(
                        f"Created SL order {sl_order_id} for parent {parent_id}, "
                        f"resp: {resp}, params: {json.dumps(params)}")
                    break
                except Exception as err:
                    logging.error(
                        f"Failed to create SL order for parent {parent_id}: {err}, "
                        f"params: {json.dumps(params)}")
                    if isinstance(
                            err, BinanceAPIException
                    ) and err.code == -2021:  # price is around SL now
                        logging.info(
                            f"Placing market order for parent {parent_id} "
                            "after attempt to create SL order",
                            color="yellow")
                        params.pop("stopPrice")
                        params["type"] = OrderType.MARKET

    async def _watch_orders(self):
        async def _watcher():
            while True:
                try:
                    open_symbols = await self._expire_outdated_orders_and_get_open_symbols(
                    )
                except Exception as err:
                    logging.exception(
                        f"Failed to expire outdated orders: {err}")
                async with self.slock:
                    redundant = set(
                        self.state["streams"]).difference(open_symbols)
                    if redundant:
                        logging.warning(
                            f"Resetting price streams to {open_symbols}",
                            color="yellow")
                        self.state["streams"] = open_symbols
                await self._subscribe_futures(resub=redundant)
                for sym in redundant:
                    self.prices.pop(f"{sym}USDT", None)

                now = time.time()
                async with self.olock:
                    removed = []
                    for order_id, order in self.state["orders"].items():
                        if not OrderID.is_wait(order_id):
                            continue
                        if order.get("t_ord"):
                            continue
                        if now < (order["crt"] + WAIT_ORDER_EXPIRY):
                            continue
                        logging.warning(
                            f"Wait order {order_id} has expired. Removing...",
                            color="yellow")
                        removed.append(order_id)
                        await self._cancel_order(order_id, order["sym"])
                    for order_id in removed:
                        self.state["orders"].pop(order_id)
                await asyncio.sleep(ORDER_WATCH_INTERVAL)

        asyncio.ensure_future(_watcher())

    async def _expire_outdated_orders_and_get_open_symbols(self):
        open_symbols = []
        open_orders, positions = {}, {}
        sl_orders = []
        async with self.olock:
            resp = await self.client.futures_account()
            for pos in resp["positions"]:
                amount, side = float(pos["positionAmt"]), pos["positionSide"]
                if side == "BOTH" or amount == 0:
                    continue
                positions[pos["symbol"] + side] = amount
            resp = await self.client.futures_get_open_orders()
            for order in resp:
                open_orders[order["clientOrderId"]] = order
            logging.info(
                f"Checking {len(open_orders)} orders for {len(positions)} positions: {positions}"
            )
            for oid, order in open_orders.items():
                odata = self.state["orders"].get(oid)
                if OrderID.is_market(oid) or OrderID.is_wait(oid):
                    open_symbols.append(order["symbol"][:-4])
                elif odata and (OrderID.is_target(oid)
                                or OrderID.is_stop_loss(oid)):
                    odata["filled"] = False

            removed = []
            for oid, odata in list(self.state["orders"].items()):
                if (OrderID.is_target(oid) or OrderID.is_stop_loss(oid)):
                    if self.state["orders"].get(odata["parent"]) is None:
                        logging.warning(
                            f"Order {oid} is now an orphan. Flagging for removal"
                        )
                        removed.append(oid)
                if not (OrderID.is_wait(oid) or OrderID.is_market(oid)):
                    continue
                if open_orders.get(oid) is not None:  # only filled orders
                    continue
                if (time.time() -
                        odata["crt"]) < NEW_ORDER_TIMEOUT:  # must be old
                    continue
                if odata.get("s_ord") is None:  # must have stop loss order
                    continue
                side = "LONG" if odata["side"] == "BUY" else "SHORT"
                if not positions.get(odata["sym"] + side):
                    logging.warning(
                        f"Order {oid} missing in open positions. Flagging for removal"
                    )
                    removed.append(oid)
                    continue
                children = [odata["s_ord"]] + odata["t_ord"]
                for cid in children:
                    if open_orders.get(cid) is not None:  # ignore open orders
                        continue
                    is_filled = False
                    try:
                        cdata = await self.client.futures_get_order(
                            symbol=odata["sym"], origClientOrderId=cid)
                        is_filled = cdata["status"] == "FILLED"
                    except Exception as err:
                        logging.warning(
                            f"Error fetching order {cid} for parent {odata}: {err}"
                        )
                    if is_filled:
                        self.state["orders"][cid] = {
                            "parent": oid,
                            "filled": True,
                        }
                    else:
                        logging.info(
                            f"Missing order {cid} detected for parent {oid}",
                            color="yellow")
                        self.state["orders"].pop(cid, None)
                order_hit = []
                for i in range(len(children)):
                    cdata = self.state["orders"].get(children[i])
                    order_hit.append(cdata["filled"] if cdata else False)
                    if cdata is not None:
                        continue
                    if i == 0:
                        sl_orders.append({"id": oid})
                        continue
                    tgt_id = await self._create_target_order(
                        oid, odata["sym"], odata["side"], odata["tgt"][i - 1],
                        odata["t_q"][i - 1])
                    if tgt_id is not None:
                        odata["t_ord"][i - 1] = tgt_id
                        self.state["orders"][tgt_id] = {
                            "parent": oid,
                            "filled": False,
                        }
                if order_hit[0] or all(order_hit[1:]):  # All TPs or SL hit
                    removed.append(oid)

            for oid in removed:
                logging.warning(f"Removing outdated order {oid}",
                                color="yellow")
                parent = self.state["orders"].pop(oid, None)
                if not (OrderID.is_wait(oid) or OrderID.is_market(oid)):
                    continue
                if parent is None:
                    continue
                sym = parent["sym"]
                for cid in [parent["s_ord"]] + parent["t_ord"]:
                    logging.warning(f"Removing outdated child {cid}",
                                    color="yellow")
                    c = self.state["orders"].pop(cid, None)
                    if c and not c["filled"]:
                        await self._cancel_order(cid, sym)

            for o in sl_orders:
                oid = o["id"]
                for i, tid in enumerate(self.state["orders"][oid]["t_ord"]):
                    if self.state["orders"][tid]["filled"]:
                        o["tp"] = tid
                        break

        for o in sl_orders:
            if o.get("tp"):
                await self._move_stop_loss(o["tp"])
            else:
                await self._place_sl_order(o["id"])

        return open_symbols

    async def _subscribe_futures(self, coin: str = None, resub=False):
        async with self.slock:
            num_streams = len(set(self.state["streams"]))
            resub = resub or (self.price_streamer is None)
            if coin is not None:
                coin = coin.upper()
                # We should have duplicates because it should be possible to long/short
                # on top of an existing long/short.
                self.state["streams"].append(coin)

            if num_streams != len(set(self.state["streams"])):
                resub = True

            if resub and self.price_streamer is not None:
                logging.info("Cancelling ongoing ws stream for resubscribing")
                self.price_streamer.cancel()

            symbols = set(self.state["streams"])
            if not resub or not symbols:
                return

        async def _streamer():
            subs = list(map(lambda s: s.lower() + "usdt@aggTrade", symbols))
            logging.info(
                f"Spawning listener for {len(symbols)} symbol(s): {symbols}",
                color="magenta")
            async with self.manager.futures_multiplex_socket(subs) as stream:
                while True:
                    msg = await stream.recv()
                    if msg is None:
                        logging.warning("Received 'null' in price stream",
                                        color="red")
                        continue
                    try:
                        symbol = msg["stream"].split("@")[0][:-4].upper()
                        self.prices[symbol.upper()] = float(msg["data"]["p"])
                    except Exception as err:
                        logging.error(
                            f"Failed to get price for {msg['stream']}: {err}")

        self.price_streamer = asyncio.ensure_future(_streamer())

    async def _cancel_order(self, oid: str, symbol: str):
        try:
            resp = await self.client.futures_cancel_order(
                symbol=symbol, origClientOrderId=oid)
            logging.info(f"Cancelled order {oid}: {resp}")
        except Exception as err:
            logging.error(f"Failed to cancel order {oid}: {err}")

    async def _register_order_for_signal(self, signal: Signal):
        async with self.olock:
            self.sig_cache.expire()
            # Same provider can't give signal for same symbol within 20 seconds
            key = self._cache_key(signal)
            if self.sig_cache.get(key) is not None:
                return False
            self.sig_cache[key] = ()
            return True

    async def _unregister_order(self, signal: Signal):
        async with self.olock:
            self.sig_cache.pop(self._cache_key(signal), None)

    def _cache_key(self, signal: Signal):
        return f"{signal.coin}_{signal.targets[0]}"  # coin and first target for filter

    # MARK: Rouding for min quantity and min price for symbols

    def _round_price(self, symbol: str, price: float):
        info = self.symbols[symbol]
        for f in info["filters"]:
            if f["filterType"] == "PRICE_FILTER":
                return round(
                    price, int(round(math.log(1 / float(f["tickSize"]), 10),
                                     0)))
        return price

    def _round_qty(self, symbol: str, qty: float):
        info = self.symbols[symbol]
        for f in info["filters"]:
            if f["filterType"] == "LOT_SIZE":
                return round(
                    qty, int(round(math.log(1 / float(f["minQty"]), 10), 0)))
        return qty
예제 #8
0
class CustomThreatService(BaseController):
    """
    Implements a custom threat service for Resilient.
    The root path (/xxx/ below) is configurable.
    The service provides the following URLs:
        OPTIONS /<root_path>/<any_sub_path>
        POST    /<root_path>/<any_sub_path>
        GET     /<root_path>/<any_sub_path>/<id>
    """

    # Arbitrary constant
    namespace = UUID('18222d9c-adf0-409c-aa19-beb27130ba12')

    def __init__(self, opts):
        super(CustomThreatService, self).__init__(**_make_args(opts))

        # Configurable options
        self.options = opts.get(CONFIG_SECTION, {})

        # Do we support "file-content" artifacts?  Default is no.
        # TODO add implementation support to parse the file content
        self.support_upload_file = strtobool(
            self.options.get(CONFIG_UPLOAD_FILE.key,
                             CONFIG_UPLOAD_FILE.default))

        # Default time that this service will tell Resilient to retry
        self.first_retry_secs = int(
            self.options.get(CONFIG_FIRST_RETRY_SECS.key,
                             CONFIG_FIRST_RETRY_SECS.default)) or 5
        self.later_retry_secs = int(
            self.options.get(CONFIG_LATER_RETRY_SECS.key,
                             CONFIG_LATER_RETRY_SECS.default)) or 60

        # Size of the request cache
        self.cache_size = int(
            self.options.get(CONFIG_CACHE_SIZE.key, CONFIG_CACHE_SIZE.default))
        # TTL of the request cache (millis before we give up on a request lookup)
        self.cache_ttl = int(
            self.options.get(CONFIG_CACHE_TTL.key, CONFIG_CACHE_TTL.default))

        # Limit to the number of queries we'll answer for unfinished searchers (count before giving up on them)
        self.max_retries = int(
            self.options.get(CONFIG_MAX_RETRIES.key,
                             CONFIG_MAX_RETRIES.default))

        # IDs and their results are maintained in a cache so that we can set
        # an upper bound on the number of in-progress and recent lookups.
        self.cache = TTLCache(maxsize=self.cache_size, ttl=self.cache_ttl)

        # Helper component does event dispatch work
        self.async_helper = CustomThreatServiceHelper(self)
        (self.helper_thread, self.bridge) = self.async_helper.start()

        urls = ["{0}/{1}".format(self.channel, e) for e in self.events()]
        LOG.info("Web handler for %s", ", ".join(urls))

        self.auth_user = self.options.get(CONFIG_AUTH_USER.key,
                                          CONFIG_AUTH_USER.default)
        self.auth_password = self.options.get(CONFIG_AUTH_PASSWORD.key,
                                              CONFIG_AUTH_USER.default)

    # Web endpoints

    @exposeWeb("OPTIONS")
    def _options_request(self, event, *args, **kwargs):
        """
        Options indicate to Resilient whether file upload is supported.
        """
        LOG.info(event.args[0])
        options = {"upload_file": bool(self.support_upload_file)}
        return options

    @exposeWeb("POST")
    def _post_request(self, event, *args, **kwargs):
        LOG.info(event.args[0])

        if not self.check_authentication(event.args[0]):
            LOG.error("Custom Threat Service Authentication Error")
            response = event.args[1]
            response.status = 500
            return {"id": None, "hits": []}

        result = self._handle_post_request(event, *args, **kwargs)
        LOG.info("%s: %s", event.args[1].status, json.dumps(result))
        return result

    def _handle_post_request(self, event, *args, **kwargs):
        """
        Responds to POST /cts/<anything>

        The URL below /cts/ is specific to this threat service. For example,
        /cts/one and /cts/two can be registered as two separate threat sources.
        The string 'one' or 'two' becomes the channel that searcher events are dispatched on.

        Request is a ThreatServiceArtifactDTO containing the artifact to be scanned
        Response is a ResponseDTO containing the response, or 'please retry' (HTTP status 303).
        """
        request = event.args[0]
        response = event.args[1]

        # The channels that searchers are listening for events
        cts_channel = searcher_channel(*args)

        value = request.body.getvalue()

        if not value:
            err = "Empty request"
            LOG.warn(err)
            return {"id": str(uuid4()), "hits": []}

        # Resilient sends artifacts in two formats: multi-part MIME, or plain JSON.
        # server may send either, even for cases where there is no file content,
        # so check content-type and decode appropriately.
        try:
            if request.headers and "form-data" in request.headers.get(
                    "Content-Type", ""):
                multipart_data = decoder.MultipartDecoder(
                    value, request.headers["Content-Type"])
                body = json.loads(multipart_data.parts[0].text)
                LOG.debug(body)
            else:
                body = json.loads(value.decode("utf-8"))
                LOG.debug(body)
        except (ValueError, NonMultipartContentTypeException) as e:
            err = "Can't handle request: {}".format(e)
            LOG.warn(err)
            LOG.debug(value)
            return {"id": str(uuid4()), "hits": []}

        if not isinstance(body, dict):
            # Valid JSON but not a valid request.
            err = "Invalid request: {}".format(json.dumps(body))
            LOG.warn(err)
            return {"id": str(uuid4()), "hits": []}

        # Generate a request ID, derived from the artifact being requested.
        request_id = str(uuid5(self.namespace, json.dumps(body)))
        artifact_type = body.get("type", "unknown")
        artifact_value = body.get("value")
        response_object = {"id": request_id, "hits": []}
        cache_key = (cts_channel, request_id)

        if artifact_type == "net.name" and artifact_value == "localhost":
            # Hard-coded response to 'net.name' of 'localhost'
            # because this is used in 'resutil threatservicetest'
            # and we want to return an immediate (not async) response
            return response_object

        # If we already have a completed query for this key, return it immmediately
        request_data = self.cache.get(cache_key)
        if request_data and request_data.get("complete"):
            response_object["hits"] = request_data.get("hits", [])
            return response_object

        response.status = 303
        response_object["retry_secs"] = self.first_retry_secs

        # Add the request to the cache, then notify searchers that there's a new request
        self.cache.setdefault(cache_key, {
            "id": request_id,
            "artifact": body,
            "hits": [],
            "complete": False
        })
        evt = ThreatServiceLookupEvent(request_id=request_id,
                                       name=artifact_type,
                                       artifact=body,
                                       channel=cts_channel)
        self.async_helper.fire(evt, HELPER_CHANNEL)

        return response_object

    @exposeWeb("GET")
    def _get_request(self, event, *args, **kwargs):
        LOG.info(event.args[0])

        if not self.check_authentication(event.args[0]):
            LOG.error("Custom Threat Service Authentication Error")
            response = event.args[1]
            response.status = 500
            return {"id": None, "hits": []}

        result = self._handle_get_request(event, *args, **kwargs)
        LOG.info("%s: %s", event.args[1].status, json.dumps(result))
        return result

    def _handle_get_request(self, event, *args, **kwargs):
        """
        Responds to GET /cts/<anything>/<request-id>

        The URL below /cts/ is specific to this threat service. For example,
        /cts/one and /cts/two are considered two separate threat sources.

        Response is a ResponseDTO containing the response, or 'please retry'
        """
        LOG.info(event.args[0])
        response = event.args[1]
        request_id = None
        if not args:
            return {"id": request_id, "hits": []}

        # The ID of the lookup request
        request_id = args[-1]
        # The channel that searchers are listening for events
        cts_channel = searcher_channel(*args[:-1])

        response_object = {"id": request_id, "hits": []}

        cache_key = (cts_channel, request_id)
        request_data = self.cache.get(cache_key)
        if not request_data:
            # There's no record of this request in our cache, return empty hits
            response.status = 200
            return response_object

        response_object["hits"] = request_data["hits"]
        if not request_data["complete"]:
            # The searchers haven't finished yet, return partial hits if available
            response.status = 303
            response_object["retry_secs"] = self.later_retry_secs

            # Update the counter, so we can detect "stale" failures
            request_data["count"] = request_data.get("count", 0) + 1
            if request_data["count"] > self.max_retries:
                LOG.info("Exceeded max retries for {}".format(cache_key))
                try:
                    self.cache.pop(cache_key)
                except KeyError:
                    pass
                response.status = 200
                return response_object

            return response_object

        # Remove the result from cache
        # self.cache.pop(cache_key)

        return response_object

    @handler(channel=LOOKUP_COMPLETE_CHANNEL)
    def _lookup_complete(self, event, *args, **kwargs):
        """
        A lookup event was completed
        """
        if not isinstance(event.parent, ThreatServiceLookupEvent):
            return
        results = event.parent.value.getValue()
        artifact = event.parent.artifact
        cts_channel = event.parent.cts_channel
        request_id = event.parent.request_id

        LOG.info("Lookup complete: %s, %s", event.parent, results)

        # Depending on how many components handled this lookup event,
        # the results can be a single value (dict), or an array, or None,
        # or an exception, or a tuple (type, exception, traceback)
        hits = []
        complete = True
        if isinstance(results, list):
            for result in results:
                if result:
                    if isinstance(result,
                                  (tuple, ThreatLookupIncompleteException)):
                        LOG.info("Retry later!")
                        complete = False
                    elif isinstance(result, (tuple, Exception)):
                        LOG.error("No hits due to exception")
                    else:
                        hits.append(result)
        elif results:
            if isinstance(results, (tuple, ThreatLookupIncompleteException)):
                LOG.info("Retry later!")
                complete = False
            elif isinstance(results, (tuple, Exception)):
                LOG.error("No hits due to exception")
            else:
                hits.append(results)

        # Store the result and mark as complete (or not)
        cache_key = (cts_channel, request_id)
        self.cache[cache_key] = {
            "id": request_id,
            "artifact": artifact,
            "hits": hits,
            "complete": complete
        }

    def _get_authentication_headers(self, request):
        """[extract user/password info in http header: Authentication Basic into a list]"""
        if request.headers and "Basic" in request.headers.get(
                "Authorization", ""):
            auth = request.headers.get("Authorization", "").split(' ')
            user_password = base64.b64decode(auth[1])
            return b_to_s(user_password).split(":")

        return [None, None]

    def _is_authenticated(self, user_password_list):
        """[check if a user/password pair matches values set in app.config]"""
        return self.auth_user == user_password_list[
            0] and self.auth_password == user_password_list[1]

    def check_authentication(self, request):
        """[check if the headers contain user/password information and they match the settings in app.config]"""
        return self._is_authenticated(
            self._get_authentication_headers(request))
예제 #9
0
async def cut_stop(user_id: str, uid: str, offset: float) -> Optional[str]:
    if res := check_sub(user_id, uid):
        return res

    key = task_key(user_id, uid)
    if key not in cut_tasks:
        return "不存在未结束的切片任务"

    room_id = get_sub_info_by_uid(uid)["room_id"]
    path = await check_task(room_id)
    if isinstance(path, str):
        return path

    if str(path.absolute()) != str(cut_tasks[key].file_path.absolute()):
        cut_tasks.pop(key)
        return "录播文件路径发生变动,切片失败..."

    time = await get_keyframe(room_id, offset)
    if isinstance(time, str):
        return time

    task = cut_tasks[key]
    if time <= task.start_time:
        return "切片结束时间必须大于开始时间!"
    if time - task.start_time <= 10:
        return "切片时长必须大于10s!"

    task.stop_time = time
    task.start()
    cut_tasks.pop(key)
예제 #10
0
class AdminView:
    """
        i can use adminView to control how to access the database easily
    """
    cache_ttl: int = 43200
    client = ndb.Client(namespace="main", project=Config().PROJECT)
    mem_cache = TTLCache(maxsize=2048, ttl=cache_ttl)

    def __init__(self, config):
        super(AdminView, self).__init__()
        self.cache_ttl = config.CACHE_TTL
        self.mem_cache = TTLCache(maxsize=config.CACHE_SIZE,
                                  ttl=config.CACHE_TTL)

    def get_cache(self, key: str) -> any:
        """
        :param key:
        :return:
        """
        try:
            return self.mem_cache.pop(key)
        except KeyError as e:
            pass

        with self.client.context():
            cache_values: list = StoreCache().query(
                StoreCache.cache_key == key).fetch()
            if len(cache_values) > 0:
                cache = cache_values[0]
                if timestamp() - cache.last_accessed > self.cache_ttl:
                    return None
                return cache.response
            return None

    def set_cache(self, key: str, response: dict) -> bool:

        self.mem_cache[key] = response
        with self.client.context():
            cache_values: list = StoreCache().query(
                StoreCache.cache_key == key).fetch()
            if len(cache_values) > 0:
                cache = cache_values[0]
            else:
                cache = StoreCache()
            cache.cache_key = key
            cache.response = response
            cache.last_accessed = timestamp()
            cache.put()
            return True

    def is_shutdown(self) -> bool:
        with self.client.context():
            api_settings_list: list = SettingsAPI.query().fetch()
            if len(api_settings_list) > 0:
                api_settings = api_settings_list[0]
            else:
                api_settings = SettingsAPI()

            return api_settings.user_shutdown

    def update_property_types(self, property_selections: list) -> tuple:
        selected_properties: list = []
        for _, value in enumerate(property_selections):
            selected_properties.append(
                value) if property_selections[value] else ""

        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            default_api.set_property_types(property_types=selected_properties)
            default_api.put()
        return jsonify({
            'status': 'success',
            'message': "property types successfully updated"
        }), 200

    def update_dates_selected(self, dates_selected: list) -> tuple:
        selected_properties: list = []
        for _, value in enumerate(dates_selected):
            selected_properties.append(value) if dates_selected[value] else ""
        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            default_api.set_construction_dates(
                construction_dates=selected_properties)
            default_api.put()
        return jsonify({
            'status': 'success',
            'message': "construction dates successfully updated"
        }), 200

    def update_finish_quality(self, finish_quality: list) -> tuple:
        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            default_api.set_finish_quality(finish_quality=[
                finish_quality[value] for _, value in enumerate(finish_quality)
            ])
            default_api.put()
            return jsonify({
                'status': 'success',
                'message': "finish quality successfully updated"
            }), 200

    def fetch_all_admin_defaults(self) -> dict:

        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            return {
                'status': 'success',
                'payload': default_api.to_dict(),
                'message': 'successfully fetched default api values'
            }

    def fetch_property_types(self) -> dict:
        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            return jsonify({
                'status': 'success',
                'payload': default_api.get_property_types(),
                'message': 'successfully fetched property types'
            })

    def fetch_finishing_quality(self) -> dict:
        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            return jsonify({
                'status': 'success',
                'payload': default_api.get_finish_quality(),
                'message': 'successfully fetched property types'
            })

    def get_construction_dates(self) -> dict:
        with self.client.context():
            defaults_list: list = DefaultAPIQueries.query().fetch()
            if len(defaults_list) > 0:
                default_api = defaults_list[0]
            else:
                default_api = DefaultAPIQueries()
            return jsonify({
                'status': 'success',
                'payload': default_api.get_construction_dates(),
                'message': 'successfully fetched construction dates'
            })

    def set_shutdown_status(self, status: bool) -> dict:
        with self.client.context():
            settings_list: list = SettingsAPI.query().fetch()
            if len(settings_list) > 0:
                default_settings: SettingsAPI = settings_list[0]
            else:
                default_settings: SettingsAPI = SettingsAPI()

            default_settings.set_shutdown_status(status=status)
            default_settings.put()
            if status:
                message: str = 'API is Shutting down ...'
            else:
                message: str = 'API is Restarting ....'
            return jsonify({
                'status': 'success',
                'payload': default_settings.to_dict(),
                'message': message
            })

    def get_settings(self) -> dict:
        with self.client.context():
            settings_list: list = SettingsAPI.query().fetch()
            if len(settings_list) > 0:
                default_settings: SettingsAPI = settings_list[0]
            else:
                default_settings: SettingsAPI = SettingsAPI()
            return jsonify({
                'status': 'success',
                'payload': default_settings.to_dict(),
                'message': 'Successfully fetched api settings'
            })

    def add_successful_request(self) -> bool:
        with self.client.context():
            settings_list: list = SettingsAPI.query().fetch()
            if len(settings_list) > 0:
                default_settings: SettingsAPI = settings_list[0]
            else:
                default_settings: SettingsAPI = SettingsAPI()

            default_settings.add_successful_request()
            default_settings.put()
            return True

    def add_failed_request(self) -> bool:
        with self.client.context():
            settings_list: list = SettingsAPI.query().fetch()
            if len(settings_list) > 0:
                default_settings: SettingsAPI = settings_list[0]
            else:
                default_settings: SettingsAPI = SettingsAPI()

            default_settings.add_error_request()
            default_settings.put()
            return True

    def add_cached_request(self) -> bool:
        with self.client.context():
            settings_list: list = SettingsAPI.query().fetch()
            if len(settings_list) > 0:
                default_settings: SettingsAPI = settings_list[0]
            else:
                default_settings: SettingsAPI = SettingsAPI()

            default_settings.add_cached_request()
            default_settings.put()
            return True

    def save_notification_settings(self, search: str, postcode: str,
                                   email: str) -> tuple:
        with self.client.context():
            notifications_list: typing.List[
                NotificationsSettings] = NotificationsSettings.query(
                    NotificationsSettings.email == email).fetch()
            if len(notifications_list) == 0:
                try:
                    key = NotificationsSettings(email=email,
                                                search=search,
                                                postcode=postcode).put()
                except TypeError:
                    return jsonify({
                        'status': 'failure',
                        'message': 'Database Error inform admin'
                    }), 500
                except BadValueError:
                    return jsonify({
                        'status': 'failure',
                        'message': 'Database Error inform admin'
                    }), 500

                if key is not None:
                    message: str = '''Successfully updated notifications, you will periodically receive emails with listed 
                    properties suitable to your search criteria'''
                    return jsonify({
                        'status': 'success',
                        'message': message
                    }), 200
                else:
                    message: str = '''Error saving notification settings please inform admin through chat'''
                    return jsonify({
                        'status': 'failure',
                        'message': message
                    }), 500
            else:
                message: str = '''Notifications Setting already saved'''
                return jsonify({'status': 'failure', 'message': message}), 500

    def add_notifications_user(self, email: str, name: str, uid: str) -> tuple:
        with self.client.context():
            email_list_instance: typing.List[
                EmailListUsers] = EmailListUsers.query(
                    EmailListUsers.email == email).fetch()
            if len(email_list_instance) > 0:
                message: str = ''' Notification user already exist'''
                return jsonify({'status': 'failure', 'message': message}), 500
            else:
                try:
                    key = EmailListUsers(email=email, name=name, uid=uid).put()
                    return jsonify({
                        'status':
                        'failure',
                        'message':
                        'Successfully created new mailing list user'
                    }), 200
                except TypeError:
                    return jsonify({
                        'status': 'failure',
                        'message': 'Database Error inform admin'
                    }), 500
                except BadValueError:
                    return jsonify({
                        'status': 'failure',
                        'message': 'Database Error inform admin'
                    }), 500
예제 #11
0
class Client:
    def __init__(self, config):
        self.auth_token = config["authtoken"]
        self.zoho_org_id = config["zohoOrgId"]
        try:
            self.cache_enabled = config["cache_enabled"]
        except KeyError:
            self.cache_enabled = DEFAULT_CACHE_MODE

        try:
            self.cache_ttl = config["cache_ttl"]
        except KeyError:
            self.cache_ttl = DEFAULT_CACHE_TTL
        self.requests = requests.Session()
        self.cache = TTLCache(ttl=self.cache_ttl,
                              maxsize=DEFAULT_CACHE_MAXSIZE)

    def add_to_cache(self, key, value):
        if (self.cache_enabled is None) or (self.cache_enabled is False):
            pass
        else:
            self.cache[key] = value

    def get_from_cache(self, key):
        if (self.cache_enabled is None) or (self.cache_enabled is False):
            return None
        else:
            try:
                return self.cache[key]
            except KeyError:
                return None

    def delete_from_cache(self, key):
        if (self.cache_enabled is None) or (self.cache_enabled is False):
            return False
        else:
            try:
                self.cache.pop(key=key)
                return True
            except KeyError:
                return False
            # my_key = ast.literal_eval(key)
            # return self.cache.pop(key=key)

    def get_request_headers(self, headers):
        default_headers = {
            ZOHO_AUTH_HEADER: ZOHO_AUTH_TOKEN_HEADER_PREFIX + self.auth_token,
            ZOHO_ORG_ID_HEADER: self.zoho_org_id,
            'Content-Type': "application/json"
        }
        if (headers is not None) and len(headers) > 0:
            default_headers.update(headers)
        return default_headers

    def send_request(self, method, uri, data=None, headers=None):
        try:
            response = requests.request(
                method,
                ZOHO_SUBSCRIPTION_API_URL + uri,
                data=json.dumps(data),
                headers=self.get_request_headers(headers))
            response.raise_for_status()

        except HTTPError as http_err:
            return http_err
        except Exception as err:
            return None
        if response.headers[
                'Content-Type'] == 'application/json;charset=UTF-8':
            return json.loads(response.text)
        else:
            return response.content
예제 #12
0
class AutoTTLCache(MutableMapping):
    def __init__(self,
                 items=None,
                 *,
                 maxsize,
                 ttl,
                 timer=time.monotonic,
                 getsizeof=None):
        self._cache_lock = threading.Lock()
        self._cache = TTLCache(maxsize, ttl, timer=timer, getsizeof=getsizeof)
        if items is not None:
            self._cache.update(items)
        self._monitor = CacheMonitor(self)

    @property
    def ttl(self):
        with self._cache_lock:
            return self._cache.ttl

    @property
    def maxsize(self):
        with self._cache_lock:
            return self._cache.maxsize

    @property
    def timer(self):
        with self._cache_lock:
            return self._cache.timer

    def expire(self):
        with self._cache_lock:
            self._cache.expire()

    def __contains__(self, key):
        with self._cache_lock:
            return key in self._cache

    def __setitem__(self, k, v):
        with self._cache_lock:
            self._cache[k] = v

    def __delitem__(self, k):
        with self._cache_lock:
            del self._cache[k]

    def __getitem__(self, k):
        with self._cache_lock:
            return self._cache[k]

    def __len__(self) -> int:
        with self._cache_lock:
            return len(self._cache)

    def __iter__(self):
        with self._cache_lock:
            keys = list(self._cache)
        yield from keys

    # TODO: __reduce__ and __setstate__

    def __repr__(self):
        return f"{type(self).__name__}(max_size={self.maxsize}, ttl={self.ttl})"

    def clear(self):
        with self._cache_lock:
            self._cache.clear()

    def get(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.get(*args, **kwargs)

    def pop(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.pop(*args, **kwargs)

    def setdefault(self, *args, **kwargs):
        with self._cache_lock:
            self._cache.setdefault(*args, **kwargs)

    def popitem(self):
        with self._cache_lock:
            self._cache.popitem()