コード例 #1
0
ファイル: UserWallet.py プロジェクト: LuoRyan/neo-python
    def AddContract(self, contract):
        """
        Add a contract to the database.

        Args:
            contract(neo.SmartContract.Contract): a Contract instance.
        """
        super(UserWallet, self).AddContract(contract)

        try:
            db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes())
            db_contract.delete_instance()
        except Exception as e:
            logger.info("contract does not exist yet")

        sh = bytes(contract.ScriptHash.ToArray())
        address, created = Address.get_or_create(ScriptHash=sh)
        address.IsWatchOnly = False
        address.save()
        db_contract = Contract.create(RawData=contract.ToArray(),
                                      ScriptHash=contract.ScriptHash.ToBytes(),
                                      PublicKeyHash=contract.PublicKeyHash.ToBytes(),
                                      Address=address,
                                      Account=self.__dbaccount)

        logger.debug("Creating db contract %s " % db_contract)

        db_contract.save()
コード例 #2
0
ファイル: metrics.py プロジェクト: fractos/cupcake
def metrics_cloudwatch(metric, metrics):
    logger.debug("metrics_cloudwatch: {} {}".format(metrics["id"], metric.data))
    cloudwatch = cloudwatch_client(metrics["provider"])
    cloudwatch.put_metric_data(
        MetricData=[
            {
                'MetricName': metric.name,
                'Dimensions': [
                    {
                        'Name': 'ENVIRONMENT-GROUP',
                        'Value': metric.endpoint.environment_group
                    },
                    {
                        'Name': 'ENVIRONMENT',
                        'Value': metric.endpoint.environment
                    },
                    {
                        'Name': 'ENDPOINT-GROUP',
                        'Value': metric.endpoint.endpoint_group
                    },
                    {
                        'Name': 'ENDPOINT',
                        'Value': metric.endpoint.endpoint
                    },
                ],
                'Unit': 'Milliseconds',
                'Value': metric.data
            },
        ],
        Namespace=metrics["provider"]["namespace"]
    )
コード例 #3
0
ファイル: UserWallet.py プロジェクト: LuoRyan/neo-python
    def LoadStoredData(self, key):
        logger.debug("Looking for key %s " % key)
        try:
            return Key.get(Name=key).Value
        except Exception as e:
            logger.error("Could not get key %s " % e)

        return None
コード例 #4
0
ファイル: alerts.py プロジェクト: fractos/cupcake
def deliver_alert(incident, alert_id, alert_definitions):
    logger.debug("deliver_alert: delivering to id {}".format(alert_id))
    for alert in alert_definitions["alerts"]:
        if alert["id"] == alert_id:
            if alert["@type"] == "alert-slack-webhook":
                alert_slack(incident, alert)
            elif alert["@type"] == "alert-sns":
                alert_sns(incident, alert)
コード例 #5
0
ファイル: alerts.py プロジェクト: fractos/cupcake
def alert_sns(incident, alert):
    logger.debug("alert_sns: {} {}".format(alert["id"], incident.message))

    sns_client = boto3.client("sns", alert["region"])

    _ = sns_client.publish(
        TopicArn=alert["arn"],
        Message=json.dumps(incident.as_dict(), indent=4, sort_keys=True)
    )
コード例 #6
0
ファイル: NodeLeader.py プロジェクト: LuoRyan/neo-python
 def ResetBlockRequestsAndCache(self):
     """Reset the block request counter and its cache."""
     logger.debug("Resseting Block requests")
     self.MissionsGlobal = []
     BC.Default().BlockSearchTries = 0
     for p in self.Peers:
         p.myblockrequests = set()
     BC.Default().ResetBlockRequests()
     BC.Default()._block_cache = {}
コード例 #7
0
ファイル: cupcake.py プロジェクト: fractos/cupcake
def metrics_record_response_time(endpoint, timestamp, response_time, metrics_groups):
    global metrics_definitions

    logger.debug("metrics_record_response_time({}, {}, {})".format(
        endpoint.url, str(timestamp), str(response_time)))

    metric = Metric(
        endpoint=endpoint,
        timestamp=timestamp,
        name='RESPONSE-TIME',
        data=response_time
    )

    deliver_metric_to_groups(metric, metrics_groups, metrics_definitions)
コード例 #8
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
 def save_active(self, incident):
     logger.debug("sqlite_database: save_active()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         cur = con.cursor()
         cur.execute("INSERT INTO active VALUES (?,?,?,?,?,?,?)",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint, incident.timestamp, incident.message, incident.endpoint.url))
         con.commit()
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during save_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #9
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
    def create_schema(self):
        logger.debug("sqlite_database: create_schema()")
        con = None

        try:
            con = sqlite3.connect(self.db_name)
            cur = con.cursor()
            cur.execute("CREATE TABLE active (environment_group TEXT, environment TEXT, endpoint_group TEXT, endpoint TEXT, timestamp INTEGER, message TEXT, url TEXT)")
            con.commit()
        except sqlite3.Error as e:
            logger.error("sqlite_database: problem during create_schema() - %s" % str(e))
        finally:
            if con:
                con.close()
コード例 #10
0
 def save_active(self, incident):
     logger.debug("postgresql_database: save_active()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute("INSERT INTO active VALUES (%s,%s,%s,%s,%s,%s,%s)",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint, incident.timestamp, incident.message, incident.endpoint.url))
         con.commit()
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during save_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #11
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
 def remove_active(self, incident):
     logger.debug("sqlite_database: remove_active()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         cur = con.cursor()
         cur.execute("DELETE FROM active WHERE environment_group = ? AND environment = ? AND endpoint_group = ? AND endpoint = ?",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         con.commit()
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during remove_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #12
0
 def remove_active(self, incident):
     logger.debug("postgresql_database: remove_active()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute("DELETE FROM active WHERE environment_group = %s AND environment = %s AND endpoint_group = %s AND endpoint = %s",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         con.commit()
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during remove_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #13
0
    def create_schema(self):
        logger.debug("postgresql_database: create_schema()")
        con = None

        try:
            con = psycopg2.connect(self.connection_string)
            cur = con.cursor()
            cur.execute("CREATE TABLE active (environment_group CHARACTER VARYING(500) NOT NULL, environment CHARACTER VARYING(500) NOT NULL, endpoint_group CHARACTER VARYING(500) NOT NULL, endpoint CHARACTER VARYING(500) NOT NULL, timestamp INTEGER NOT NULL, message CHARACTER VARYING(500) NOT NULL, url CHARACTER VARYING(500) NOT NULL)")
            con.commit()
        except psycopg2.Error as e:
            logger.error("postgresql_database: problem during create_schema() - %s" % str(e))
        finally:
            if con:
                con.close()
コード例 #14
0
 def get_active(self, incident):
     logger.debug("postgresql_database: get_active()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
         cur.execute('''SELECT * FROM active WHERE environment_group = %s AND environment = %s AND endpoint_group = %s AND endpoint = %s''',
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         data = cur.fetchone()
         return data
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during get_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #15
0
 def get_all_actives(self):
     logger.debug("postgresql_database: get_all_actives()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
         cur.execute("SELECT * FROM active")
         data = cur.fetchall()
         return data
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during get_all_actives() - %s" % str(e))
         return None
     finally:
         if con:
             con.close()
コード例 #16
0
 def active_exists(self, incident):
     logger.debug("postgresql_database: active_exists()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute('''SELECT COUNT(*) FROM active WHERE environment_group = %s AND environment = %s AND endpoint_group = %s AND endpoint = %s''',
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         data = cur.fetchone()
         return int(data[0]) > 0
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during active_exists() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #17
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
 def active_exists(self, incident):
     logger.debug("sqlite_database: active_exists()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         cur = con.cursor()
         cur.execute('''SELECT COUNT(*) FROM active WHERE environment_group = ? AND environment = ? AND endpoint_group = ? AND endpoint = ?''',
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         data = cur.fetchone()
         return int(data[0]) > 0
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during active_exists() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #18
0
ファイル: UserWallet.py プロジェクト: LuoRyan/neo-python
    def Rebuild(self):
        self._lock.acquire()
        try:
            super(UserWallet, self).Rebuild()

            logger.debug("wallet rebuild: deleting %s coins and %s transactions" %
                         (Coin.select().count(), Transaction.select().count()))

            for c in Coin.select():
                c.delete_instance()
            for tx in Transaction.select():
                tx.delete_instance()
        finally:
            self._lock.release()

        logger.debug("wallet rebuild complete")
コード例 #19
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
 def get_active(self, incident):
     logger.debug("sqlite_database: get_active()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         con.row_factory = sqlite3.Row
         cur = con.cursor()
         cur.execute('''SELECT * FROM active WHERE environment_group = ? AND environment = ? AND endpoint_group = ? AND endpoint = ?''',
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         data = cur.fetchone()
         return data
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during get_active() - %s" % str(e))
     finally:
         if con:
             con.close()
コード例 #20
0
ファイル: sqlite_database.py プロジェクト: fractos/cupcake
 def get_all_actives(self):
     logger.debug("sqlite_database: get_all_actives()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         con.row_factory = sqlite3.Row
         cur = con.cursor()
         cur.execute("SELECT * FROM active")
         data = cur.fetchall()
         return data
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during get_all_actives() - %s" % str(e))
         return None
     finally:
         if con:
             con.close()
コード例 #21
0
ファイル: VersionPayload.py プロジェクト: LuoRyan/neo-python
    def Deserialize(self, reader):
        """
        Deserialize full object.

        Args:
            reader (neo.IO.BinaryReader):
        """
        self.Version = reader.ReadUInt32()
        self.Services = reader.ReadUInt64()
        self.Timestamp = reader.ReadUInt32()
        self.Port = reader.ReadUInt16()
        self.Nonce = reader.ReadUInt32()
        self.UserAgent = reader.ReadVarString().decode('utf-8')
        self.StartHeight = reader.ReadUInt32()
        logger.debug("Version start height: T %s " % self.StartHeight)
        self.Relay = reader.ReadBool()
コード例 #22
0
ファイル: UserWallet.py プロジェクト: LuoRyan/neo-python
    def OnCoinsChanged(self, added, changed, deleted):

        for coin in added:
            addr_hash = bytes(coin.Output.ScriptHash.Data)

            try:
                address = Address.get(ScriptHash=addr_hash)

                c = Coin(
                    TxId=bytes(coin.Reference.PrevHash.Data),
                    Index=coin.Reference.PrevIndex,
                    AssetId=bytes(coin.Output.AssetId.Data),
                    Value=coin.Output.Value.value,
                    ScriptHash=bytes(coin.Output.ScriptHash.Data),
                    State=coin.State,
                    Address=address
                )
                c.save()
                logger.debug("saved coin %s " % c)
            except Exception as e:
                logger.error("COULDN'T SAVE!!!! %s " % e)

        for coin in changed:
            for hold in self._holds:
                if hold.Reference == coin.Reference and coin.State & CoinState.Spent > 0:
                    hold.IsComplete = True
                    hold.save()
            try:
                c = Coin.get(TxId=bytes(coin.Reference.PrevHash.Data), Index=coin.Reference.PrevIndex)
                c.State = coin.State
                c.save()
            except Exception as e:
                logger.error("Coulndn't change coin %s %s (coin to change not found)" % (coin, e))

        for coin in deleted:
            for hold in self._holds:
                if hold.Reference == coin.Reference:
                    hold.IsComplete = True
                    hold.save()
            try:
                c = Coin.get(TxId=bytes(coin.Reference.PrevHash.Data), Index=coin.Reference.PrevIndex)
                c.delete_instance()

            except Exception as e:
                logger.error("could not delete coin %s %s " % (coin, e))
コード例 #23
0
ファイル: Block.py プロジェクト: LuoRyan/neo-python
    def Verify(self, completely=False):
        """
        Verify the integrity of the block.

        Args:
            completely: (Not functional at this time).

        Returns:
            bool: True if valid. False otherwise.
        """
        res = super(Block, self).Verify()
        if not res:
            return False

        logger.debug("Verifying BLOCK!!")
        from neo.Blockchain import GetBlockchain, GetConsensusAddress

        # first TX has to be a miner transaction. other tx after that cant be miner tx
        if self.Transactions[0].Type != TransactionType.MinerTransaction:
            return False
        for tx in self.Transactions[1:]:
            if tx.Type == TransactionType.MinerTransaction:
                return False

        if completely:
            bc = GetBlockchain()

            if self.NextConsensus != GetConsensusAddress(bc.GetValidators(self.Transactions).ToArray()):
                return False

            for tx in self.Transactions:
                if not tx.Verify():
                    pass
            logger.error("Blocks cannot be fully validated at this moment.  please pass completely=False")
            raise NotImplementedError()
            # do this below!
            # foreach(Transaction tx in Transactions)
            # if (!tx.Verify(Transactions.Where(p = > !p.Hash.Equals(tx.Hash)))) return false;
            # Transaction tx_gen = Transactions.FirstOrDefault(p= > p.Type == TransactionType.MinerTransaction);
            # if (tx_gen?.Outputs.Sum(p = > p.Value) != CalculateNetFee(Transactions)) return false;

        return True
コード例 #24
0
ファイル: run_ssh_cmd.py プロジェクト: jctanner/ansible-tools
def validate_control_socket(SSHCMD):
    # $ ssh -O check -o ControlPath=... vagrant@el6host
    # Master running (pid=24779)
    for idx, x in enumerate(SSHCMD):
        if x.startswith('ControlPath'):
            cppath = x.split('=')[1]

            if not os.path.exists(cppath):
                logger.info('%s does not exist' % cppath)
            else:
                cpcmd = SSHCMD[:-1]

                checkcmd = cpcmd[:]
                checkcmd.insert(-1, '-O')
                checkcmd.insert(-1, 'check')
                print('# %s' % ' '.join(checkcmd))
                (rc, so, se) = run_ssh_cmd(
                    ' '.join(checkcmd),
                    use_selectors=False
                )
                logger.debug('rc: %s' % rc)
                logger.debug('so: %s' % so)
                logger.debug('se: %s' % se)

                if rc != 0 or so.strip():
                    logger.info('checkcmd rc != 0 or has stdout')
                    logger.info(so)
                    logger.info(se)
コード例 #25
0
def perform_algo_task():
    coinsAboveThreshold = {}
    coinsElgibleForIncrease = {}

    indexInfo = DatabaseManager.get_index_info_model()

    if indexInfo.Active == True:
        percentage_btc_amount = indexInfo.TotalBTCVal * (
            indexInfo.BalanceThreshold / 100)
        logger.debug("Percentage_to_btc_amount: " + str(percentage_btc_amount))

        if percentage_btc_amount <= CondexConfig.BITTREX_MIN_BTC_TRADE_AMOUNT:
            logger.debug("Current BTC Threshold Value To Low - " +
                         str(percentage_btc_amount))
        else:
            # Generate our winners/lossers list
            for indexedCoin in DatabaseManager.get_all_index_coin_models():
                if indexedCoin.UnrealizedGain >= indexInfo.BalanceThreshold:
                    coinsAboveThreshold[
                        indexedCoin.Ticker] = indexedCoin.UnrealizedGain
                elif indexedCoin.UnrealizedGain <= indexInfo.BalanceThreshold:
                    coinsElgibleForIncrease[
                        indexedCoin.Ticker] = indexedCoin.UnrealizedGain

            # Sort our tables
            coinsAboveThreshold = Util.tuple_list_to_dict(
                sorted(coinsAboveThreshold.items(),
                       key=lambda pair: pair[1],
                       reverse=True))
            coinsElgibleForIncrease = Util.tuple_list_to_dict(
                sorted(coinsElgibleForIncrease.items(),
                       key=lambda pair: pair[1],
                       reverse=True))

            if len(coinsAboveThreshold) >= 1:
                logger.debug("Currently " + str(len(coinsAboveThreshold)) +
                             " avalible for rebalance")
                logger.debug(coinsAboveThreshold)

                if len(coinsElgibleForIncrease) >= 1:
                    logger.debug("Currently " +
                                 str(len(coinsElgibleForIncrease)) +
                                 " elgible for increase")
                    logger.debug(coinsElgibleForIncrease)
                    for akey in coinsAboveThreshold:

                        # Check to see if we still have coins to increase
                        if len(coinsElgibleForIncrease) >= 1:

                            elgibleCoinTicker = coinsElgibleForIncrease.keys(
                            )[0]

                            rebalanceCoinLocked = False
                            elgibleCoinLocked = False

                            if DatabaseManager.get_coin_lock_model(akey):
                                rebalanceCoinLocked = True

                            if DatabaseManager.get_coin_lock_model(
                                    elgibleCoinTicker):
                                rebalanceCoinLocked = True

                            if rebalanceCoinLocked == False and elgibleCoinLocked == False:

                                indexCoinInfo = DatabaseManager.get_index_coin_model(
                                    akey)
                                coinBalance = DatabaseManager.get_coin_balance_model(
                                    akey)

                                rebalanceSpecialTicker = akey + "/BTC"

                                if akey == "BTC":
                                    rebalanceSpecialTicker = "BTC/USDT"

                                rebalanceCoinTickerModel = DatabaseManager.get_ticker_model(
                                    rebalanceSpecialTicker)
                                elgibleCoinTickerModel = DatabaseManager.get_ticker_model(
                                    elgibleCoinTicker + "/BTC")

                                amountOfRebalanceToSell = 0.0

                                if akey == "BTC":
                                    amountOfRebalanceToSell = percentage_btc_amount
                                else:
                                    amountOfRebalanceToSell = percentage_btc_amount / rebalanceCoinTickerModel.BTCVal

                                amountOfEligbleToBuy = percentage_btc_amount / elgibleCoinTickerModel.BTCVal

                                if coinBalance.TotalCoins >= amountOfRebalanceToSell:
                                    DatabaseManager.create_coin_lock_model(
                                        akey)
                                    DatabaseManager.create_coin_lock_model(
                                        elgibleCoinTicker)

                                    logger.info("Performing Rebalance " +
                                                akey.upper() + " " +
                                                str(amountOfRebalanceToSell) +
                                                " - " +
                                                elgibleCoinTicker.upper() +
                                                " " +
                                                str(amountOfEligbleToBuy))
                                    #perform_rebalance_task.s(akey.upper(), amountOfRebalanceToSell, elgibleCoinTicker.upper(), amountOfEligbleToBuy)
                                    app.send_task(
                                        'Tasks.perform_rebalance_task',
                                        args=[
                                            akey.upper(),
                                            amountOfRebalanceToSell,
                                            elgibleCoinTicker.upper(),
                                            amountOfEligbleToBuy
                                        ])
                                    # Need to remove the eligbile coin from dictireonary
                                    del coinsElgibleForIncrease[
                                        elgibleCoinTicker]
                                else:
                                    logger.error(
                                        "Failed to sell coins - we do not have enough of "
                                        + str(akey))

                            else:
                                logger.debug("One of the coins where locked")

                else:
                    logger.debug("No coins eligible for increase")
            else:
                logger.debug("No coins above threshold")
コード例 #26
0
def perform_rebalance_task(rebalanceTicker, rebalanceSellAmount, elgibleTicker,
                           elgibleBuyAmount):

    coinSellIncomplete = True
    coinBuyIncomplete = True
    coinSellRetryCount = 0
    coinBuyRetryCount = 0
    coinSellFailed = False

    sellOrderUUID = ""
    buyOrderUUID = ""

    indexInfo = DatabaseManager.get_index_info_model()

    retryLimit = indexInfo.OrderRetryAmount

    rebalanceTickerGainModel = DatabaseManager.get_realized_gain_model(
        rebalanceTicker)
    elgibleCoinTicker = DatabaseManager.get_ticker_model(elgibleTicker +
                                                         "/BTC")

    em = ExchangeManager()

    partial_fill_amount = 0
    partial_filled = False

    if rebalanceTicker != "BTC" and rebalanceTicker != "btc":

        while coinSellIncomplete:

            if coinSellRetryCount >= retryLimit:
                coinSellFailed = True
                coinSellIncomplete = False
                break
                # Cancel Order
            else:

                rebalanceCoinTicker = DatabaseManager.get_ticker_model(
                    rebalanceTicker + "/BTC")

                if CondexConfig.DEBUG == True:
                    logger.info("Placing Sell Order For " + rebalanceTicker +
                                "/BTC")
                else:
                    logger.info("Selling " + str(rebalanceSellAmount) +
                                " of " + rebalanceTicker + " at " +
                                str(rebalanceCoinTicker.BTCVal))
                    sellOrderUUID = em.create_sell_order(
                        rebalanceTicker, rebalanceSellAmount,
                        rebalanceCoinTicker.BTCVal)['id']
                    time.sleep(60 * indexInfo.OrderTimeout)

                # Check order succeded through
                if CondexConfig.DEBUG == True:
                    logger.debug("Fetching order")
                    coinSellIncomplete = False
                else:

                    order_result = em.fetch_order(sellOrderUUID)
                    order_filled_amount = order_result['filled']

                    if order_result['status'] == "closed":
                        logger.debug("Sold coin " + rebalanceTicker + " for " +
                                     str(order_result['price']))
                        coinSellIncomplete = False
                        DatabaseManager.update_realized_gain_model(
                            rebalanceTicker,
                            rebalanceTickerGainModel.RealizedGain +
                            ((order_filled_amount * rebalanceCoinTicker.BTCVal)
                             / indexInfo.TotalBTCVal) * 100)
                    elif (
                            order_filled_amount * rebalanceCoinTicker.BTCVal
                    ) > CondexConfig.BITTREX_MIN_BTC_TRADE_AMOUNT and order_result[
                            'status'] == "open":
                        em.cancel_order(sellOrderUUID)
                        logger.debug("Sold partial of coin " +
                                     rebalanceTicker + " for " +
                                     str(order_result['price']))
                        coinSellIncomplete = False
                        partial_filled = True
                        partial_fill_amount = order_filled_amount * rebalanceCoinTicker.BTCVal
                        DatabaseManager.update_realized_gain_model(
                            rebalanceTicker,
                            rebalanceTickerGainModel.RealizedGain +
                            ((order_filled_amount * rebalanceCoinTicker.BTCVal)
                             / indexInfo.TotalBTCVal) * 100)
                    else:
                        coinSellRetryCount = coinSellRetryCount + 1
                        if CondexConfig.DEBUG == True:
                            logger.debug("Canceling sell order")
                        else:
                            em.cancel_order(sellOrderUUID)
                            logger.debug("Sell Order Timeout Reached")
                        time.sleep(10)  #Magic Number

    if coinSellFailed:
        logger.info("Sell of coin " + rebalanceTicker + " failed after " +
                    str(coinSellRetryCount) + " attempts")

    else:
        while coinBuyIncomplete:

            if coinBuyRetryCount >= retryLimit:
                coinBuyIncomplete = False
                logger.info("Buying of coin " + rebalanceTicker +
                            " failed after " + str(coinBuyRetryCount) +
                            " attempts")
                break
                # Cancel Order
            else:

                if CondexConfig.DEBUG == True:
                    logger.debug("Putting in buy order")
                else:
                    logger.info("Buying " + str(elgibleBuyAmount) + " of " +
                                elgibleTicker + " at " +
                                str(elgibleCoinTicker.BTCVal))
                    if partial_filled == True:
                        buyOrderUUID = em.create_buy_order(
                            elgibleTicker,
                            partial_fill_amount / elgibleCoinTicker.BTCVal,
                            elgibleCoinTicker.BTCVal)['id']
                    else:
                        buyOrderUUID = em.create_buy_order(
                            elgibleTicker, elgibleBuyAmount,
                            elgibleCoinTicker.BTCVal)['id']
                    time.sleep(60 * indexInfo.OrderTimeout)

                # Check order succeded through
                if CondexConfig.DEBUG == True:
                    logger.debug("Fetching order")
                    coinBuyIncomplete = False
                else:

                    order_result = em.fetch_order(buyOrderUUID)
                    order_filled_amount = order_result['filled']

                    if order_result['status'] == "closed":
                        logger.info("Bought coin " + elgibleTicker + " for " +
                                    str(order_result['price']))
                        coinBuyIncomplete = False

                        if rebalanceTicker == "BTC" or rebalanceTicker == "btc":
                            DatabaseManager.update_realized_gain_model(
                                rebalanceTicker,
                                rebalanceTickerGainModel.RealizedGain +
                                ((order_filled_amount *
                                  elgibleCoinTicker.BTCVal) /
                                 indexInfo.TotalBTCVal) * 100)

                    elif (
                            order_filled_amount * elgibleCoinTicker.BTCVal
                    ) > CondexConfig.BITTREX_MIN_BTC_TRADE_AMOUNT and order_result[
                            'status'] == "open":
                        em.cancel_order(buyOrderUUID)
                        logger.debug("Bought partial of coin " +
                                     elgibleCoinTicker + " for " +
                                     str(order_result['price']))
                        coinBuyIncomplete = False

                        if rebalanceTicker == "BTC" or rebalanceTicker == "btc":
                            DatabaseManager.update_realized_gain_model(
                                rebalanceTicker,
                                rebalanceTickerGainModel.RealizedGain +
                                ((order_filled_amount *
                                  elgibleCoinTicker.BTCVal) /
                                 indexInfo.TotalBTCVal) * 100)

                    else:
                        coinBuyRetryCount = coinBuyRetryCount + 1
                        if CondexConfig.DEBUG == True:
                            logger.debug("Canceling buy order")
                        else:
                            try:
                                em.cancel_order(buyOrderUUID)
                            except:
                                coinBuyIncomplete = False
                                pass  # order failed to cancel got filled previously
                            logger.debug("Buy Order Timeout Reached")
                        time.sleep(10)  #Magic Number

    # Delete the locks
    if CondexConfig.DEBUG != True:
        DatabaseManager.delete_coin_lock_model(rebalanceTicker)
        DatabaseManager.delete_coin_lock_model(elgibleTicker)
コード例 #27
0
ファイル: metrics.py プロジェクト: fractos/cupcake
def deliver_metric_to_group(metric, metrics_group_id, metrics_definitions):
    logger.debug("deliver_metric_to_group: delivering to group id {}".format(metrics_group_id))
    for metrics_id in get_metrics_in_group(metrics_group_id, metrics_definitions):
        deliver_metric(metric, metrics_id, metrics_definitions)
コード例 #28
0
    def get_sodar_info(self):
        """Method evaluates user input to extract or create iRODS path. Use cases:

        1. User provides Landing Zone UUID: fetch path and use it.
        2. User provides Project UUID:
           i. If there are LZ associated with project, select the latest active and use it.
          ii. If there are no LZ associated with project, create a new one and use it.
        3. Data provided by user is neither an iRODS path nor a valid UUID. Report error and throw exception.

        :return: Returns landing zone UUID and path to iRODS directory.
        """
        # Initialise variables
        lz_irods_path = None
        lz_uuid = None
        not_project_uuid = False
        create_lz_bool = self.args.yes
        in_destination = self.args.destination
        assay_uuid = self.args.assay

        # Project UUID provided by user
        if is_uuid(in_destination):

            if create_lz_bool:
                # Assume that provided UUID is associated with a Project and user wants a new LZ.
                # Behavior: search for available LZ; if none,create new LZ.
                try:
                    lz_uuid, lz_irods_path = self.get_latest_landing_zone(
                        project_uuid=in_destination, assay_uuid=assay_uuid)
                    if not lz_irods_path:
                        logger.info(
                            "No active Landing Zone available for project %s, "
                            "a new one will be created..." % lz_uuid)
                        lz_uuid, lz_irods_path = self.create_landing_zone(
                            project_uuid=in_destination, assay_uuid=assay_uuid)
                except requests.exceptions.HTTPError as e:
                    exception_str = str(e)
                    logger.error(
                        "Unable to create Landing Zone using UUID %s. HTTP error %s "
                        % (in_destination, exception_str))
                    raise

            else:
                # Assume that provided UUID is associated with a Project.
                # Behaviour: get iRODS path from latest active Landing Zone.
                try:
                    lz_uuid, lz_irods_path = self.get_latest_landing_zone(
                        project_uuid=in_destination, assay_uuid=assay_uuid)
                except requests.exceptions.HTTPError as e:
                    not_project_uuid = True
                    exception_str = str(e)
                    logger.debug(
                        "Provided UUID may not be associated with a Project. HTTP error %s"
                        % exception_str)

                # Assume that provided UUID is associated with a LZ
                # Behaviour: get iRODS path from it.
                if not_project_uuid:
                    try:
                        lz_uuid = in_destination
                        lz_irods_path = self.get_landing_zone_by_uuid(
                            lz_uuid=lz_uuid)
                    except requests.exceptions.HTTPError as e:
                        exception_str = str(e)
                        logger.debug(
                            "Provided UUID may not be associated with a Landing Zone. HTTP error %s"
                            % exception_str)

                # Request input from user.
                # Behaviour: depends on user reply to questions.
                if not not_project_uuid:
                    # Active lz available
                    # Ask user if should use latest available or create new one.
                    if lz_irods_path:
                        logger.info("Found active Landing Zone: %s" %
                                    lz_irods_path)
                        if (not input("Can the process use this path? [yN] ").
                                lower().startswith("y")):
                            logger.info(
                                "...an alternative is to create another Landing Zone using the UUID %s"
                                % in_destination)
                            if (input(
                                    "Can the process create a new landing zone? [yN] "
                            ).lower().startswith("y")):
                                lz_uuid, lz_irods_path = self.create_landing_zone(
                                    project_uuid=in_destination,
                                    assay_uuid=assay_uuid)
                            else:
                                msg = "Not possible to continue the process without a landing zone path. Breaking..."
                                logger.info(msg)
                                raise UserCanceledException(msg)

                    # No active lz available
                    # As user if should create new new.
                    else:
                        logger.info(
                            "No active Landing Zone available for UUID %s" %
                            in_destination)
                        if (input(
                                "Can the process create a new landing zone? [yN] "
                        ).lower().startswith("y")):
                            lz_uuid, lz_irods_path = self.create_landing_zone(
                                project_uuid=in_destination,
                                assay_uuid=assay_uuid)
                        else:
                            msg = "Not possible to continue the process without a landing zone path. Breaking..."
                            logger.info(msg)
                            raise UserCanceledException(msg)

        # Not able to process - raise exception.
        # UUID provided is not associated with project nor lz.
        if lz_irods_path is None:
            msg = "Data provided by user is not a valid UUID. Please review input: {0}".format(
                in_destination)
            logger.error(msg)
            raise ParameterException(msg)

        # Log
        logger.info("Target iRODS path: %s" % lz_irods_path)

        # Return
        return lz_uuid, lz_irods_path
コード例 #29
0
def store_action(scope, provider):

    # implement InfluxDB line protocol
    # using the requests lib
    # scope is a tag

    logger.debug("store_action")
    logger.debug("Scope: {}".format(scope))
    logger.debug("Influx host: {}".format(influx_host))
    logger.debug("Influx port: {}".format(influx_port))
    logger.debug("Influx endpoint: {}".format(influx_http_endpoint))
    logger.debug("Influx database: {}".format(influx_database))

    payload = ""

# TODO the measurement should contains the experiment name label
    if provider['type'] == 'python':
        payload = encode_payload_in_line_protocol(
            "chaos_toolkit.actions",
            tags={
                "experiment": "exp1",
                "scope": scope
            },
            fields=provider
        )

    r = requests.post("http://{}:{}{}".format(
        influx_host, influx_port, influx_http_endpoint),
        params={"db": influx_database},
        data=payload)

    if (r.status_code != 204):
        logger.error("Error sending data to InfluxDB: {}".format(r.json()))

    logger.debug("store_action ended")
    return 1
コード例 #30
0
ファイル: core.py プロジェクト: snakedragondevs/leviathan
 async def tick_processor(self):
     while True:  # while server is running
         self.tick()
         logger.debug('ticking {}'.format(self.tick_counter))
         await asyncio.sleep(1)
コード例 #31
0
     raise exceptions.AuthenticationError(err.args[0])
 if candidates:
     target = candidates.pop()
 else:
     raise exceptions.ProviderError(
         provider="AnsibleTower",
         message=f"{subject.capitalize()} not found by name: {name}",
     )
 payload = {"extra_vars": str(kwargs)}
 if self.inventory:
     payload["inventory"] = self.inventory
 else:
     logger.info(
         "No inventory specified, Ansible Tower will use a default.")
 logger.debug(
     f"Launching {subject}: {url_parser.urljoin(self.url, str(target.url))}\n"
     f"{payload=}")
 job = target.launch(payload=payload)
 job_number = job.url.rstrip("/").split("/")[-1]
 job_api_url = url_parser.urljoin(self.url, str(job.url))
 if self._is_aap:
     job_ui_url = url_parser.urljoin(
         self.url, f"/#/jobs/{subject}/{job_number}/output")
 else:
     job_ui_url = url_parser.urljoin(self.url,
                                     f"/#/{subject}s/{job_number}")
 helpers.emit(api_url=job_api_url, ui_url=job_ui_url)
 logger.info("Waiting for job: \n"
             f"API: {job_api_url}\n"
             f"UI: {job_ui_url}")
 job.wait_until_completed(
コード例 #32
0
def check_submission(bot, bot_checker: "BotChecker", to_check: Bot):
    # TODO: make this method async
    if bot_checker is None:
        return

    botlistbot_user = User.botlist_user_instance()

    log.debug("Checking bot {}...".format(to_check.username))

    def reject(reason):
        to_check.delete_instance()
        msg = notify_submittant_rejected(
            bot,
            botlistbot_user,
            notify_submittant=True,
            reason=reason,
            to_reject=to_check,
        )
        bot.formatter.send_message(settings.BOTLIST_NOTIFICATIONS_ID, msg)

    try:
        peer = bot_checker.resolve_bot(to_check)
    except UsernameNotOccupied:
        to_check.delete_instance()
        reject(
            "The entity you submitted either does not exist or is not a Telegram bot."
        )
        return

    bot_checker.update_bot_details(to_check, peer)

    if to_check.userbot:
        reject(
            "You submitted the name of a Telegram user, not one of a bot. If you're trying to "
            "submit a userbot, please contact the BLSF directly ("
            "@BotListChat)."
        )
        return

    # Check online state
    response = loop.run_until_complete(
        bot_checker.get_ping_response(
            to_check, timeout=18, try_inline=to_check.inlinequeries
        )
    )

    is_offline = not bool(response)

    if is_offline:
        reject(
            "The bot you sent seems to be offline, unfortunately. Feel free to submit it again "
            "when it's back up 😙"
        )
        return

    now = datetime.datetime.now()
    to_check.last_ping = now
    to_check.last_response = now

    loop.run_until_complete(add_keywords(bot, response, to_check))

    # Download profile picture
    if settings.DOWNLOAD_PROFILE_PICTURES:
        # TODO: does this work asynchronously?
        loop.run_until_complete(download_profile_picture(bot, bot_checker, to_check))

    to_check.save()
    log.info(f"{to_check} was evaluated and looks good for approval.")
コード例 #33
0
    def VM_FAULT_and_report(self, id, *args):
        self._VMState |= VMState.FAULT

        if id == VMFault.INVALID_JUMP:
            error_msg = "Attemping to JMP/JMPIF/JMPIFNOT to an invalid location."

        elif id == VMFault.INVALID_CONTRACT:
            script_hash = args[0]
            error_msg = "Trying to call an unknown contract with script_hash {}\nMake sure the contract exists on the blockchain".format(script_hash)

        elif id == VMFault.CHECKMULTISIG_INVALID_PUBLICKEY_COUNT:
            error_msg = "CHECKMULTISIG - provided public key count is less than 1."

        elif id == VMFault.CHECKMULTISIG_SIGNATURE_ERROR:
            if args[0] < 1:
                error_msg = "CHECKMULTISIG - Minimum required signature count cannot be less than 1."
            else:  # m > n
                m = args[0]
                n = args[1]
                error_msg = "CHECKMULTISIG - Insufficient signatures provided ({}). Minimum required is {}".format(m, n)

        elif id == VMFault.UNPACK_INVALID_TYPE:
            item = args[0]
            error_msg = "Failed to UNPACK item. Item is not an array but of type: {}".format(type(item))

        elif id == VMFault.PICKITEM_INVALID_TYPE:
            index = args[0]
            item = args[1]
            error_msg = "Cannot access item at index {}. Item is not an array or dict but of type: {}".format(index, type(item))

        elif id == VMFault.PICKITEM_NEGATIVE_INDEX:
            error_msg = "Attempting to access an array using a negative index"

        elif id == VMFault.PICKITEM_INVALID_INDEX:
            index = args[0]
            length = args[1]
            error_msg = "Array index is less than zero or {} exceeds list length {}".format(index, length)

        elif id == VMFault.APPEND_INVALID_TYPE:
            item = args[0]
            error_msg = "Cannot append to item. Item is not an array but of type: {}".format(type(item))

        elif id == VMFault.REVERSE_INVALID_TYPE:
            item = args[0]
            error_msg = "Cannot REVERSE item. Item is not an array but of type: {}".format(type(item))

        elif id == VMFault.REMOVE_INVALID_TYPE:
            item = args[0]
            index = args[1]
            error_msg = "Cannot REMOVE item at index {}. Item is not an array but of type: {}".format(index, type(item))

        elif id == VMFault.REMOVE_INVALID_INDEX:
            index = args[0]
            length = args[1]

            if index < 0:
                error_msg = "Cannot REMOVE item at index {}. Index < 0".format(index)

            else:  # index >= len(items):
                error_msg = "Cannot REMOVE item at index {}. Index exceeds array length {}".format(index, length)

        elif id == VMFault.POP_ITEM_NOT_ARRAY:
            error_msg = "Items(s) not array: %s" % [item for item in args]

        elif id == VMFault.UNKNOWN_OPCODE:
            opcode = args[0]
            error_msg = "Unknown opcode found: {}".format(opcode)

        else:
            error_msg = id

        # these get used a lot actually now, so we dont want them printed to the console
        if id in [VMFault.THROW, VMFault.THROWIFNOT]:
            logger.debug("({}) {}".format(self.ops_processed, id))
        else:
            logger.error("({}) {}".format(self.ops_processed, error_msg))

        return
コード例 #34
0
 async def schedule_conversation_deletion(self, peer, delay=5):
     await asyncio.sleep(delay)
     self.send(DeleteHistory(await self.resolve_peer(peer), max_id=999999999, just_clear=True))
     log.debug("Deleted conversation with {}".format(peer))
コード例 #35
0
def _watch_n_reload(watch_dir: str = "", debug: bool = True):  # pylint: disable=too-many-branches, too-many-statements
    # fmt: on
    """ watch and reload plugins in a directory.

    Default to the current directory
    """

    if debug:
        logzero.loglevel(10)
    else:
        logzero.loglevel(20)

    _watch_n_reload.flag = False  # break flag

    if not watch_dir:
        watch_dir = CURDIR
    logger.debug("\n\tstarting watchgod in %s\n", watch_dir)

    if not Path(watch_dir).is_dir():
        logger.error(" %s is not a directory or does not exist, exiting...")
        return None

    _ = Path(watch_dir).absolute()

    # cache watch_dir absolute path
    _watch_n_reload.watch_dir = _.__str__()

    p_dir = _.parent.__str__()  # parent dir of watch_dir
    # m_dir for module_path needed in the following
    m_dir = _.stem  # module dir name

    # make sure it's a package (__init__.py present)
    if not (_ / "__init__.py").exists():
        logger.error(" __init__.py not present in %s", _)
        logger.info(
            "You need to place an __init__.py in the directory *%s*. Exiting... not watching the directory...",
            _)
        return None

    # append p_dir to sys.path if not already in sys.path
    if p_dir not in sys.path:
        sys.path.append(p_dir)

    # pylint: disable=protected-access
    logger.debug("\n\t>plugin.PluginManager._plugins: %s",
                 plugin.PluginManager._plugins)

    # nonebot.load_plugins(mdir)  # this does not work

    logger.debug("\n\t>plugin.PluginManager._plugins: %s",
                 plugin.PluginManager._plugins)

    # async for changes in awatch(watch_dir):
    for changes in watch(watch_dir):
        # indicator
        _watch_n_reload.watching = True

        if _watch_n_reload.flag:
            logger.debug("breaking from the changes loop")
            break

        # print("type: ", type(changes), "dir: ", dir(changes))
        # print("changes %s" % changes)
        logger.debug("changes %s", changes)

        list_ = []
        for _ in changes:
            # for _i in _:

            flag, _i = _  # flag: Change.modified, Change.deleted, Change.added
            flag = str(flag)

            logger.debug(" flag: %s, file: %s", flag, _i)

            if flag.endswith("deleted"):
                try:
                    # module_path = f"{Path(CURDIR).stem}.{Path(_i).stem}"
                    # module_path = f"{Path(watch_dir)}.{Path(_i).stem}"
                    module_path = f"{m_dir}.{Path(_i).stem}"

                    if module_path in plugin.PluginManager._plugins:
                        res = plugin.PluginManager.remove_plugin(module_path)
                        logger.info("\n\t %s removed: %s", module_path, res)

                    _ = """
                    try:
                        del plugin.PluginManager._plugins[module_path]
                    except Exception as exc:
                        logger.error(f" del plugin.PluginManager._plugin[{module_path}]: %s", exc)
                    # """

                except Exception as exc:
                    logger.error("PluginManager.remove exc: %s", exc)
                continue

            _ = """
            if flag.endswith("added"):
                try:
                    module_path = f"{Path(CURDIR).stem}.{Path(_i).stem}"
                    res = plugin.load_plugin(module_path)
                    logger.info(" %s added, %s", module_path, res)
                except Exception as exc:
                    logger.error("plugin.load_plugin exc: %s", exc)
                continue
            # """

            try:
                file_ = Path(_i)
                if file_.exists():
                    list_.append(file_.stem)
            except Exception as exc:
                logger.debug("file_ = Path(_i) exc: %s (expected)", exc)

        logger.info("changed file: *%s*", list_)

        for _ in list_:
            try:
                # _ = str(Path(CURDIR) / _)

                logger.debug("\n\t>plugin.PluginManager._plugins: %s",
                             plugin.PluginManager._plugins)
                logger.debug(" >>> re/loading %s", _)

                # _ = plugin.reload_plugin(f"plugins1.{_}")

                # module_path = f"{Path(CURDIR).stem}.{_}"
                # module_path = f"{Path(watch_dir)}.{_}"
                module_path = f"{m_dir}.{_}"
                logger.debug(" module_path: %s", module_path)

                res = plugin.reload_plugin(module_path)
                # try load_plugin for newly added files
                if not res:
                    res = plugin.load_plugin(module_path)

                logger.info(" #># %s reloaded: [%s]", module_path, res)

                logger.debug("\n\t>plugin.PluginManager._plugins: %s",
                             plugin.PluginManager._plugins)

            except Exception as exc:
                logger.error(" #plugin.reaload exc: %s", exc)

        # await asyncio.sleep(2)
        sleep(2)

    logger.debug(
        "end of watchgod -- this will only materialize when _watch_n_reload.flag is set to True."
    )

    _watch_n_reload.watching = False

    return None
コード例 #36
0
    def _best_match(self, videos):
        if not videos:
            log.error("No videos found on YouTube for a given search")
            return None

        """ Select the best matching video from a list of videos. """
        if const.args.manual:
            log.info(self.raw_song)
            sys.stdout.flush()
            log.info("0. Skip downloading this song.\n")
            sys.stdout.flush()
            # fetch all video links on first page on YouTube
            for i, v in enumerate(videos):
                log.info(
                    u"{0}. {1} {2} {3}".format(
                        i + 1,
                        v["title"],
                        v["videotime"],
                        "http://youtube.com/watch?v=" + v["link"],
                    )
                )
            # let user select the song to download
            result = internals.input_link(videos)
            if result is None:
                return None
        else:
            if not self.meta_tags:
                # if the metadata could not be acquired, take the first result
                # from Youtube because the proper song length is unknown
                result = videos[0]
                log.debug(
                    "Since no metadata found on Spotify, going with the first result"
                )
                sys.stdout.flush()
            else:
                # filter out videos that do not have a similar length to the Spotify song
                duration_tolerance = 10
                max_duration_tolerance = 20
                possible_videos_by_duration = []

                # start with a reasonable duration_tolerance, and increment duration_tolerance
                # until one of the Youtube results falls within the correct duration or
                # the duration_tolerance has reached the max_duration_tolerance
                while len(possible_videos_by_duration) == 0:
                    possible_videos_by_duration = list(
                        filter(
                            lambda x: abs(x["seconds"] - self.meta_tags["duration"])
                            <= duration_tolerance,
                            videos,
                        )
                    )
                    duration_tolerance += 1
                    if duration_tolerance > max_duration_tolerance:
                        log.error(
                            "{0} by {1} was not found.".format(
                                self.meta_tags["name"],
                                self.meta_tags["artists"][0]["name"],
                            )
                        )
                        sys.stdout.flush()
                        return None

                result = possible_videos_by_duration[0]

        if result:
            url = "http://youtube.com/watch?v={0}".format(result["link"])
        else:
            url = None

        return url
コード例 #37
0
def create_k8s_api_client(secrets: Secrets = None) -> client.ApiClient:
    """
    Create a Kubernetes client from:

    1. From a local configuration file if it exists (`~/.kube/config`). You
       can specify which context you want to use as well through the
       `KUBERNETES_CONTEXT` key in the environment or in the `secrets` object.
    2. From the cluster configuration if executed from a Kubernetes pod and
       the CHAOSTOOLKIT_IN_POD is set to `"true"`.
    3. From a mix of the following environment keys:

        * KUBERNETES_HOST: Kubernetes API address

        You can authenticate with a token via:
        * KUBERNETES_API_KEY: the API key to authenticate with
        * KUBERNETES_API_KEY_PREFIX: the key kind, if not set, defaults to
          "Bearer"

        Or via a username/password:
        * KUBERNETES_USERNAME
        * KUBERNETES_PASSWORD

        Or via SSL:
        * KUBERNETES_CERT_FILE
        * KUBERNETES_KEY_FILE

        Finally, you may disable SSL verification against HTTPS endpoints:
        * KUBERNETES_VERIFY_SSL: should we verify the SSL (unset means no)
        * KUBERNETES_CA_CERT_FILE: path the CA certificate when verification is
          expected

        You may pass a secrets dictionary, in which case, values will be looked
        there before the environ.
    """
    env = os.environ
    secrets = secrets or {}

    def lookup(k: str, d: str = None) -> str:
        return secrets.get(k, env.get(k, d))

    if has_local_config_file():
        context = lookup("KUBERNETES_CONTEXT")
        logger.debug("Using Kubernetes context: {}".format(context
                                                           or "default"))
        return config.new_client_from_config(context=context)

    elif env.get("CHAOSTOOLKIT_IN_POD") == "true":
        config.load_incluster_config()
        return client.ApiClient()

    else:
        configuration = client.Configuration()
        configuration.debug = True
        configuration.host = lookup("KUBERNETES_HOST", "http://localhost")
        configuration.verify_ssl = lookup("KUBERNETES_VERIFY_SSL",
                                          False) is not False
        configuration.cert_file = lookup("KUBERNETES_CA_CERT_FILE")

        if "KUBERNETES_API_KEY" in env or "KUBERNETES_API_KEY" in secrets:
            configuration.api_key['authorization'] = lookup(
                "KUBERNETES_API_KEY")
            configuration.api_key_prefix['authorization'] = lookup(
                "KUBERNETES_API_KEY_PREFIX", "Bearer")
        elif "KUBERNETES_CERT_FILE" in env or \
                "KUBERNETES_CERT_FILE" in secrets:
            configuration.cert_file = lookup("KUBERNETES_CERT_FILE")
            configuration.key_file = lookup("KUBERNETES_KEY_FILE")
        elif "KUBERNETES_USERNAME" in env or "KUBERNETES_USERNAME" in secrets:
            configuration.username = lookup("KUBERNETES_USERNAME")
            configuration.password = lookup("KUBERNETES_PASSWORD", "")

    return client.ApiClient(configuration)
コード例 #38
0
ファイル: demo2.py プロジェクト: stungkit/logzero
# Import the `logzero.logger` instance
import logzero
from logzero import logger

logzero.loglevel(logzero.WARNING)

# Start logging
logger.debug("hello")
logger.info("info")
logger.warning("warn")
logger.error("error")
コード例 #39
0
ファイル: Block.py プロジェクト: LuoRyan/neo-python
 def RebuildMerkleRoot(self):
     """Rebuild the merkle root of the block"""
     logger.debug("Rebuilding merkle root!")
     if self.Transactions is not None and len(self.Transactions) > 0:
         self.MerkleRoot = MerkleTree.ComputeRoot([tx.Hash for tx in self.Transactions])
コード例 #40
0
    def _dryrun_method(self):
        if not self.dryrun:
            return

        if not self.dryrun_config_folder:
            log.warning("config folder {} not found - exit script".format(
                self.dryrun_config_folder))
            sys.exit("config folder {} not found".format(
                self.dryrun_config_folder))

        log.debug("** config folder = {}".format(self.dryrun_config_folder))

        # pe_routers = [ p.split('//')[1] for p in self.pelist ]
        # log.debug("PE router list = %s" % pe_routers)

        def validate_config_filename(d, f):
            """
            Some validation if we want to use the config file
            """
            validated = True
            if not os.path.isfile(os.path.join(d, f)):
                validated = False
            if f.startswith("."):
                validated = False
            m = re.match("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\.txt", f)
            if m:
                validated = False
            return validated

        configs = [
            f for f in os.listdir(self.dryrun_config_folder)
            if validate_config_filename(self.dryrun_config_folder, f)
        ]
        log.debug("configs found in {}: {}".format(self.dryrun_config_folder,
                                                   configs))

        supported_routertypes = RouterFactory.SupportedRouterTypes()

        # start the preparser to find the function of each config
        self.pre_parser(configs)

        for c in configs:
            ## skip files starting with "."
            # if c.startswith("."):
            #    log.info("skipping hostname starting with . : %s" % c)
            #    continue
            ## remove any extensions from the hostnames
            routername = c.split(".")[0]
            ## check if we allow IP addresses as hostname
            if not self.allow_ip_as_hostname:
                if re.match("[0-9]+\.[0-9]+\.[0-9]+\.[0-9]", routername):
                    log.info(
                        "skipping hostname '{}' because ip addresses are not allowed"
                        .format(routername))
                    continue
            # r = RouterFactory()

            # if routername.lower() in pe_routers:
            #    type = "PE"
            # else:
            #    type = "CPE"

            config_function = self.find_config_function(c)

            if config_function not in supported_routertypes:
                log.warn("unsupported router type '{}', skip config file: {}".
                         format(config_function, c))
                continue

            # rtr = r.factory(routertype=type, saveconfig=None)
            rtr = RouterFactory.NewRouter(routertype=config_function,
                                          configfile=c)

            log.debug("Opening config file: {}".format("{}/{}".format(
                self.dryrun_config_folder, c)))
            with open("{}/{}".format(self.dryrun_config_folder, c),
                      encoding="latin-1") as configfile:
                config = configfile.read()
            log.debug("** DRYRUN - parsing: {} ({})".format(
                c, config_function))
            self._parser_running_config(rtr, config)

            # for CPE routers in dryrun method, check the config file age
            if int(self.dryrun_config_age) > 0 and config_function == "CPE":
                log.debug("check if config is less then {} days old".format(
                    self.dryrun_config_age))
                oldest_time = int(
                    time()) - (self.dryrun_config_age * 3600 * 24)

                if int(rtr.getLastSeen()) < int(oldest_time):
                    log.warning(
                        "config is older than the expected {} days ({}) -- skipping: {}"
                        .format(self.dryrun_config_age, oldest_time, c))
                    continue
                else:
                    log.debug("config age = {}".format(rtr.getLastSeen()))

            ## store the router object
            self._store_router_object(rtr)
コード例 #41
0
ファイル: settings.py プロジェクト: iburgoa13/lsql
"""
Django settings for lsql project. Loads settings_shared and settings_dev or settings_deploy
depending on the value of DJANGO_DEVELOPMENT

Generated by 'django-admin startproject' using Django 3.0.7.

For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""

import os
from logzero import logger

# Load common settings
from .settings_shared import *

# Load development or deployment settings
if os.environ.get('DJANGO_DEVELOPMENT'):
    logger.debug('Loading DEVELOPMENT settings')
    from .settings_dev import *
else:
    logger.debug('Loading DEPLOY settings')
    from .settings_deploy import *
コード例 #42
0
 def ParseDevices(self, type):
     if self.dryrun:
         log.debug("DRYRUN MODE - no telnet/ssh connections made")
         self._dryrun_method()
         return
コード例 #43
0
 def __init__(self, **kwargs):
     # Get the specified AT instance
     logger.debug(f"AnsibleTower instantiated with {kwargs=}")
     instance_name = kwargs.pop("AnsibleTower", None)
     # Validate the AnsibleTower-specific settings
     self._validate_settings(instance_name)
     # get our instance settings
     self.url = settings.ANSIBLETOWER.base_url
     self.uname = settings.ANSIBLETOWER.get("username")
     self.pword = settings.ANSIBLETOWER.get("password")
     self.token = settings.ANSIBLETOWER.get("token")
     self._inventory = (kwargs.get("tower_inventory")
                        or settings.ANSIBLETOWER.inventory)
     # Init the class itself
     self._construct_params = []
     config = kwargs.get("config", awxkit.config)
     config.base_url = self.url
     root = kwargs.get("root")
     if root is None:
         root = awxkit.api.Api()  # support mock stub for unit tests
     # Prefer token if its set, otherwise use username/password
     # auth paths for the API taken from:
     # https://github.com/ansible/awx/blob/ddb6c5d0cce60779be279b702a15a2fddfcd0724/awxkit/awxkit/cli/client.py#L85-L94
     # unit test mock structure means the root API instance can't be loaded on the same line
     if self.token:
         helpers.emit(auth_type="token")
         logger.info("Using token authentication")
         config.token = self.token
         try:
             root.connection.login(username=None,
                                   password=None,
                                   token=self.token,
                                   auth_type="Bearer")
         except awxkit.exceptions.Unauthorized as err:
             raise exceptions.AuthenticationError(err.args[0])
         versions = root.get().available_versions
         try:
             # lookup the user that authenticated with the token
             # If a username was specified in config, use that instead
             my_username = (self.uname or
                            versions.v2.get().me.get().results[0].username)
         except (IndexError, AttributeError):
             # lookup failed for whatever reason
             raise exceptions.ProviderError(
                 provider="AnsibleTower",
                 message=
                 "Failed to lookup a username for the given token, please check credentials",
             )
     else:  # dynaconf validators should have checked that either token or password was provided
         helpers.emit(auth_type="password")
         logger.info("Using username and password authentication")
         config.credentials = {
             "default": {
                 "username": self.uname,
                 "password": self.pword
             }
         }
         config.use_sessions = True
         root.load_session().get()
         versions = root.available_versions
         my_username = self.uname
     self.v2 = versions.v2.get()
     # Check to see if we're running AAP (ver 4.0+)
     self._is_aap = False if self.v2.ping.get().version[0] == "3" else True
     self.username = my_username
コード例 #44
0
ファイル: Mysql2Redis.py プロジェクト: logonmy/XX
    def column_to_redis(self,
                        table_name,
                        column,
                        key_name="",
                        key_type="list",
                        exists_set_name=None,
                        from_0=False,
                        del_key=False,
                        del_exists_key=False,
                        total=0,
                        ts=0,
                        key_filter=None,
                        key_name_filter=None,
                        *arg,
                        **kw):
        from_id_key = "k_add_" + self.db + "_" + table_name + "_" + key_name + "_from_id"
        redis_from_id = self.ConnRedis.get(from_id_key) if self.ConnRedis.get(
            from_id_key) else 0
        from_id = 0 if from_0 else redis_from_id
        k_ADDREDIS_FROM_ID = from_id if from_id else 0
        print(" ==== FROM_ID is " + str(from_id))

        sql = "Select id," + column + " FROM " + table_name + " WHERE  ID > %s LIMIT 1000"
        keys = 1
        if del_key:
            self.ConnRedis.delete(key_name)
        if del_exists_key:
            self.ConnRedis.delete(exists_set_name)
        num = 0
        key_type = key_type if key_type else get_key_type(key_name)
        while keys:
            val = k_ADDREDIS_FROM_ID,
            keys = self.ConnMysql.get_lists(sql, val)
            for one_key in keys:
                key = list(one_key)
                if key_filter:
                    key[1] = key_filter(key[1], **kw)
                k_ADDREDIS_FROM_ID = key[0]
                if key[1]:
                    if key_name_filter:
                        KeyName = key_name_filter(KeyName=KeyName, key=key[1])
                    if ts and ts < 0.01:
                        if k_ADDREDIS_FROM_ID % int(1 / ts) == 0:
                            time.sleep(1)
                    else:
                        time.sleep(ts)
                    res = 0
                    if exists_set_name:
                        if self.ConnRedis.sismember(exists_set_name, key[1]):
                            print("EXITS >>> " + key[1] + "  from _id >>  " +
                                  str(key[0]))
                            continue
                    if key_type == "list":
                        if kw.get("limit"):
                            if int(self.ConnRedis.llen(key)) > kw["limit"]:
                                print("Too Much")
                                return
                        # 把某列添加到队列中
                        if len(key) == 2:
                            res = self.ConnRedis.rpush(KeyName, key[1])
                        # 某两列中的一列添加到队列中
                        elif len(key) == 3:
                            value = key[1] if key[1] else key[2]
                            res = self.ConnRedis.lpush(KeyName, value)
                        # 某三列中的一列添加到队列中
                        elif len(key) == 4:
                            value = key[1] if key[
                                1] else key[2] if key[2] else key[3]
                            res = self.ConnRedis.lpush(KeyName, value)
                    elif key_type == "set":
                        if kw.get("limit"):
                            if int(self.ConnRedis.scard(key)) > kw["limit"]:
                                print("Too Much")
                                return
                        res = self.ConnRedis.sadd(KeyName, key[1])
                    elif key_type == "kv":
                        res = self.ConnRedis.set(key[1], key[2])
                    else:
                        logger.debug("????")
                    if res and exists_set_name:
                        self.ConnRedis.sadd(exists_set_name, key[1])
                    self.ConnRedis.set(from_id_key, k_ADDREDIS_FROM_ID)
                    print(
                        kw.get("fn") + " add res :>> " + str(res) +
                        "  now id :>> " + str(key[0]))
                    num += 1
                    if total and num >= total:
                        print(" Done --- ")
                        return
                else:
                    print("Error key" + str(key[1]))
コード例 #45
0
    def execute(self) -> typing.Optional[int]:
        """Execute the transfer."""
        # Validate arguments
        res = self.check_args(self.args)
        if res:  # pragma: nocover
            return res

        # Logger
        logger.info("Starting cubi-tk snappy %s", self.command_name)
        logger.info("  args: %s", self.args)

        # Fix for ngs_mapping & variant_calling vs step
        if self.step_name is None:
            self.step_name = self.args.step

        # Find biomedsheet file
        biomedsheet_tsv = get_biomedsheet_path(start_path=self.args.base_path,
                                               uuid=self.args.destination)

        # Extract library names from sample sheet
        sheet = load_sheet_tsv(biomedsheet_tsv, self.args.tsv_shortcut)
        library_names = list(
            self.yield_ngs_library_names(sheet=sheet,
                                         min_batch=self.args.first_batch,
                                         max_batch=self.args.last_batch))
        logger.info("Libraries in sheet:\n%s",
                    "\n".join(sorted(library_names)))

        lz_uuid, transfer_jobs = self.build_jobs(library_names)
        logger.debug("Transfer jobs:\n%s",
                     "\n".join(map(lambda x: x.to_oneline(), transfer_jobs)))

        if self.fix_md5_files:
            transfer_jobs = self._execute_md5_files_fix(transfer_jobs)

        total_bytes = sum([job.bytes for job in transfer_jobs])
        logger.info(
            "Transferring %d files with a total size of %s",
            len(transfer_jobs),
            sizeof_fmt(total_bytes),
        )
        counter = Value(c_ulonglong, 0)
        with tqdm.tqdm(total=total_bytes, unit="B", unit_scale=True) as t:
            if self.args.num_parallel_transfers == 0:  # pragma: nocover
                for job in transfer_jobs:
                    irsync_transfer(job, counter, t)
            else:
                pool = ThreadPool(processes=self.args.num_parallel_transfers)
                for job in transfer_jobs:
                    pool.apply_async(irsync_transfer, args=(job, counter, t))
                pool.close()
                pool.join()

        # Validate and move transferred files
        # Behaviour: If flag is True and lz uuid is not None*,
        # it will ask SODAR to validate and move transferred files.
        # (*) It can be None if user provided path
        if lz_uuid and self.args.validate_and_move:
            self.move_landing_zone(lz_uuid=lz_uuid)
        else:
            logger.info(
                "Transferred files will \033[1mnot\033[0m be automatically moved in SODAR."
            )

        logger.info("All done")
        return None
コード例 #46
0
    def _parse_method_ast(self, method_ast, class_name=None):
        """Recurse through a method's AST to find anything useful"""
        name = method_ast.name
        self.methods[name] = {"calls": [], "covers": []}
        known_vars = {}
        logger.debug(f"Parsing method ast {name}")
        for line_node in method_ast.body:
            unparsed_line = self._ast_to_str(line_node)
            logger.debug(f"Investigating line: {unparsed_line}")
            known_entities = self._find_entity(unparsed_line)
            # parse the line and find the interests
            line_parser = NodeParser(known_entities=known_entities)
            line_parser.visit(line_node)
            logger.debug(f"Found interests {line_parser.interests}")
            # check to see if an entity was instanced and/or assigned to a variable
            for entity in line_parser.interests["entity_instance"]:
                if self.create_on_instance:
                    self.methods[name]["covers"].append(f"{entity} create")
                for key, val in line_parser.interests["assignment"].items():
                    if val == "create":
                        known_vars[key] = entity
            # catch the rest just in case we miss something
            for entity in [*known_entities, *known_vars]:
                if (entity in line_parser.interests["method_calls"]
                        and self.create_on_instance):
                    self.methods[name]["covers"].append(f"{entity} create")
            # check for method calls
            for meth_call in line_parser.interests["method_calls"]:
                if (not line_parser.interests["module_accessed"]
                        and not line_parser.interests["attributes_accessed"]):
                    # a non-external method is being called
                    self.methods[name]["calls"].append(meth_call)
                else:
                    found = False
                    # test for the method being a member of a module. module.method()
                    for module in line_parser.interests["module_accessed"]:
                        # We need to resolve uses of self and cls
                        if module in ["self", "cls"
                                      ] and self.classes.get(class_name):
                            #  logger.warning(f"class name: {class_name}, meth call: {meth_call}")
                            if class_name and meth_call in self.classes[
                                    class_name]["methods"]:
                                module = class_name
                            elif class_name and self.classes[class_name][
                                    "bases"]:
                                for base in self.classes[class_name]["bases"]:
                                    self._to_investigate.append(
                                        f"{base}.{meth_call}")
                            else:
                                logger.debug(
                                    f"Can't resolve {module} for {meth_call}")
                        if f"{module}.{meth_call}" in unparsed_line:
                            # the method belongs to this module
                            if module in known_entities:
                                self.methods[name]["covers"].append(
                                    f"{module} {meth_call}")
                            else:
                                self.methods[name]["calls"].append(
                                    f"{module} {meth_call}")
                            found = True
                            break
                        # if it isn't a direct member, see if it is related
                        # entity.something.method()
                        for attr in line_parser.interests[
                                "attributes_accessed"]:
                            # We need to resolve uses of self and cls
                            if attr in ["self", "cls"]:
                                if class_name and meth_call in self.classes[
                                        class_name]["methods"]:
                                    attr = class_name
                                elif class_name and self.classes[class_name][
                                        "bases"]:
                                    for base in self.classes[class_name][
                                            "bases"]:
                                        self._to_investigate.append(
                                            f"{base}.{attr}.{meth_call}")
                                else:
                                    logger.debug(
                                        f"Can't resolve {attr} for {module}'s {meth_call}"
                                    )
                            # now we need to determine
                            if f"{module}.{attr}.{meth_call}" in unparsed_line:
                                # the method belongs to this module
                                if module in known_entities:
                                    self.methods[name]["covers"].append(
                                        f"{module} {attr} {meth_call}")
                                elif (attr in known_entities
                                      or f"{module}.{attr}" in known_vars):
                                    self.methods[name]["covers"].append(
                                        f"{attr} {meth_call}")
                                else:
                                    self.methods[name]["calls"].append(
                                        f"{module} {attr} {meth_call}")
                                found = True
                                break

                    if not found:
                        self.methods[name]["calls"].append(
                            f"{meth_call}".strip())
                        self._to_investigate.append(meth_call)
            logger.debug(
                f"known vars: {known_vars}, known entities: {known_entities} \nMethod Report: {self.methods[name]}"
            )
コード例 #47
0
    def yield_ngs_library_names(self,
                                sheet,
                                min_batch=None,
                                max_batch=None,
                                batch_key="batchNo",
                                family_key="familyId"):
        """Yield all NGS library names from sheet.

        When ``min_batch`` is given then only the donors for which the ``extra_infos[batch_key]`` is greater than
        ``min_batch`` will be used.

        This function can be overloaded, for example to only consider the indexes.

        :param sheet: Sample sheet.
        :type sheet: biomedsheets.models.Sheet

        :param min_batch: Minimum batch number to be extracted from the sheet. All samples in batches below this values
        will be skipped.
        :type min_batch: int

        :param max_batch: Maximum batch number to be extracted from the sheet. All samples in batches above this values
        will be skipped.
        :type max_batch: int

        :param batch_key: Batch number key in sheet. Default: 'batchNo'.
        :type batch_key: str

        :param family_key: Family identifier key. Default: 'familyId'.
        :type family_key: str
        """
        family_max_batch = self._build_family_max_batch(
            sheet, batch_key, family_key)

        # Process all libraries and filter by family batch ID.
        for donor in sheet.bio_entities.values():
            # Ignore below min batch number if applicable
            if min_batch is not None:
                batch = self._batch_of(donor, family_max_batch, batch_key,
                                       family_key)
                if batch < min_batch:
                    logger.debug(
                        "Skipping donor %s because %s = %d < min_batch = %d",
                        donor.name,
                        batch_key,
                        batch,
                        min_batch,
                    )
                    continue
            # Ignore above max batch number if applicable
            if max_batch is not None:
                batch = self._batch_of(donor, family_max_batch, batch_key,
                                       family_key)
                if batch > max_batch:
                    logger.debug(
                        "Skipping donor %s because %s = %d > max_batch = %d",
                        donor.name,
                        batch_key,
                        batch,
                        max_batch,
                    )
                    # It would be tempting to add a `break`, but there is no guarantee that
                    # the sample sheet is sorted.
                    continue
            for bio_sample in donor.bio_samples.values():
                for test_sample in bio_sample.test_samples.values():
                    for library in test_sample.ngs_libraries.values():
                        yield library.name
コード例 #48
0
 def close(self):
     #if not self.conn.closed:
     logger.debug(
         f'====== Close {self.host}.{self.database} using account {self.user} ======'
     )
     self.conn.close()
コード例 #49
0
def perform_buy_task(elgibleTicker, elgibleBuyAmount):

    coinBuyIncomplete = True
    coinBuyRetryCount = 0

    buyOrderUUID = ""

    indexInfo = DatabaseManager.get_index_info_model()

    retryLimit = indexInfo.OrderRetryAmount

    elgibleCoinTicker = DatabaseManager.get_ticker_model(elgibleTicker +
                                                         "/BTC")

    em = ExchangeManager()

    partial_fill_amount = 0
    partial_filled = False

    DatabaseManager.create_coin_lock_model(elgibleTicker)

    while coinBuyIncomplete:

        if coinBuyRetryCount >= retryLimit:
            coinBuyIncomplete = False
            logger.info("Buying of coin " + rebalanceTicker +
                        " failed after " + str(coinBuyRetryCount) +
                        " attempts")
            break
            # Cancel Order
        else:

            if CondexConfig.DEBUG == True:
                logger.debug("Putting in buy order")
            else:
                logger.info("Buying " + str(elgibleBuyAmount) + " of " +
                            elgibleTicker + " at " +
                            str(elgibleCoinTicker.BTCVal))
                buyOrderUUID = em.create_buy_order(
                    elgibleTicker, elgibleBuyAmount,
                    elgibleCoinTicker.BTCVal)['id']
                time.sleep(60 * indexInfo.OrderTimeout)

            # Check order succeded through
            if CondexConfig.DEBUG == True:
                logger.debug("Fetching order")
                coinBuyIncomplete = False
            else:

                order_result = em.fetch_order(buyOrderUUID)
                order_filled_amount = order_result['filled']

                if order_result['status'] == "closed":
                    logger.info("Bought coin " + elgibleTicker + " for " +
                                str(order_result['price']))
                    coinBuyIncomplete = False

                elif (
                        order_filled_amount * elgibleCoinTicker.BTCVal
                ) > CondexConfig.BITTREX_MIN_BTC_TRADE_AMOUNT and order_result[
                        'status'] == "open":
                    em.cancel_order(buyOrderUUID)
                    logger.debug("Bought partial of coin " +
                                 elgibleCoinTicker + " for " +
                                 str(order_result['price']))
                    coinBuyIncomplete = False

                else:
                    coinBuyRetryCount = coinBuyRetryCount + 1
                    if CondexConfig.DEBUG == True:
                        logger.debug("Canceling buy order")
                    else:
                        try:
                            em.cancel_order(buyOrderUUID)
                        except:
                            coinBuyIncomplete = False
                            pass  # order failed to cancel got filled previously
                        logger.debug("Buy Order Timeout Reached")
                    time.sleep(10)  #Magic Number

    # Delete the locks
    if CondexConfig.DEBUG != True:
        DatabaseManager.delete_coin_lock_model(elgibleTicker)
コード例 #50
0
def client(service_name: str,
           version: str = 'v1',
           secrets: Secrets = None) -> Resource:
    """
    Create a client for the given service. 

    To authenticate, you need to create a service account manually and either
    pass the filename or the content of the file into the `secrets` object.

    So, in the experiment, use one of the followings:

    ```json
    {
        "gce": {
            "service_account_file": "/path/to/file.json"
        }
    }
    ```

    ```json
    {
        "gce": {
            "service_account_info": {
                "type": "service_account",
                "project_id": "...",
                "private_key_id": "...",
                "private_key": "...",
                "client_email": "...",
                "client_id": "...",
                "auth_uri": "https://accounts.google.com/o/oauth2/auth",
                "token_uri": "https://accounts.google.com/o/oauth2/token",
                "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
                "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/...."
            }
        }
    }
    ```

    You would likely want to read value from the environment or Vault if you
    use the second approach, and avoid storing sensitive data into the
    experiment itself.

    Make sure your service account has enough permissions for the activities
    you wish to conduct (though do not give it too wide permissions either).

    See: https://developers.google.com/api-client-library/python/auth/service-accounts
    Also: http://google-auth.readthedocs.io/en/latest/reference/google.oauth2.service_account.html
    """  # noqa: E501
    secrets = secrets or {}
    service_account_file = secrets.get("service_account_file")
    service_account_info = secrets.get("service_account_info")

    credentials = None
    if service_account_file:
        service_account_file = os.path.expanduser(service_account_file)
        if not os.path.exists(service_account_file):
            raise FailedActivity("GCE account settings not found at {}".format(
                service_account_file))

        logger.debug(
            "Using GCE credentials from file: {}".format(service_account_file))
        credentials = Credentials.from_service_account_file(
            service_account_file)
    elif service_account_info and isinstance(service_account_info, dict):
        logger.debug("Using GCE credentials embedded into secrets")
        credentials = Credentials.from_service_account_info(
            service_account_info)
    else:
        raise FailedActivity(
            "missing GCE credentials settings in secrets of this activity")

    if credentials is not None and credentials.expired:
        logger.debug("GCE credentials need to be refreshed as they expired")
        credentials.refresh(httplib2.Http())

    if not credentials:
        raise FailedActivity(
            "missing a service account to authenticate with the "
            "Google Cloud Services")

    return build(service_name, version=version, credentials=credentials)
コード例 #51
0
ファイル: metrics.py プロジェクト: fractos/cupcake
def deliver_metric_to_groups(metric, metrics_groups, metrics_definitions):
    logger.debug("deliver_metric_to_groups ({})".format(",".join(metrics_groups)))
    for metrics_group_id in metrics_groups:
        deliver_metric_to_group(metric, metrics_group_id, metrics_definitions)
コード例 #52
0
ファイル: client.py プロジェクト: freshman118/automatic_test
    def templ_multi_match(self,
                          templ,
                          match_rate=0.75,
                          search_area=None,
                          debug=False):
        """ 模板多次匹配

        Args:
            templ:
            match_rate:
            search_area:
            debug

        Returns:
            (len(ce_pts), ce_pts)
        """
        # 读取模板图
        templ = self.transfer2cv(templ)  # 模板图
        image = self._full_screen  # 母图
        h, w = templ.shape[:2]
        logger.debug('width: {}, height: {}'.format(w, h))
        # 处理匹配区域
        if search_area != None:
            ((lx_search_area, ly_search_are), (rx_search_area,
                                               ry_search_area)) = search_area
            image = self._full_screen[ly_search_are:ry_search_area,
                                      lx_search_area:
                                      rx_search_area]  # 截取指定矩形区域作为匹配区域

        # 标准相关模板匹配
        matrix = cv2.matchTemplate(image, templ, cv2.TM_CCOEFF_NORMED)

        # 匹配度大于0.75得图片
        loc = numpy.where(matrix >= match_rate)
        lf_pts = zip(*loc[::-1])  # 模板左上角坐标
        # (78, 89, 89) (12, 78, 23)
        # (89, 89, 78) (23, 78, 12)
        # (89, 23), (89, 78), (78, 12)
        ce_pts = []  # 模板中心坐标列表
        for lf_pt in lf_pts:
            # 匹配区域为全屏
            if search_area == None:
                ce_pt = (lf_pt[0] + w / 2, lf_pt[1] + h / 2)  # 模板右下角坐标
                ce_pts.append(ce_pt)
                if debug:
                    rb_pt = (lf_pt[0] + w, lf_pt[1] + h)  # 模板右下角坐标
                    cv2.rectangle(self._full_screen, lf_pt, rb_pt, (0, 0, 255),
                                  2)
                    cv2.imshow('template', templ)
                    cv2.imshow('image', image)
                    cv2.waitKey(0)
                print('Full screen:', len(ce_pts), ce_pts)
            # 指定匹配区域
            else:
                ce_pt = (lf_pt[0] + w / 2 + search_area[0][0],
                         lf_pt[1] + h / 2 + search_area[0][1])  # 模板右下角坐标
                ce_pts.append(ce_pt)
                if debug:
                    rb_pt = (lf_pt[0] + w + search_area[0][0],
                             lf_pt[1] + h + search_area[0][1])  # 模板右下角坐标
                    cv2.rectangle(self._full_screen, lf_pt, rb_pt, (0, 0, 255),
                                  2)
                    cv2.imshow('template', templ)
                    cv2.imshow('image', image)
                    cv2.waitKey(0)
                # print('Search area:', len(ce_pts), ce_pts)
            return (len(ce_pts), ce_pts)
コード例 #53
0
ファイル: metrics.py プロジェクト: fractos/cupcake
def deliver_metric(metric, metrics_id, metrics_definitions):
    logger.debug("deliver_metric: delivering to id {}".format(metrics_id))
    for metrics in metrics_definitions["metrics"]:
        if metrics["id"] == metrics_id:
            if metrics["provider"]["@type"] == "cloudwatch":
                metrics_cloudwatch(metric, metrics)
コード例 #54
0
def savexlsx_command(self, event=None) -> None:
    """ savexlsx_command.
    self is aligner
    self.Table.model.df: DataFrame of interest

    self.filename1, self.filename2

    """

    # check QUEUE_PA QUEUE_SA
    try:
        self.paligned = QUEUE_PA.get_nowait()
        queue1_put(QUEUE_PA, self.paligned)
    except Empty:
        self.paligned = False
    try:
        self.saligned = QUEUE_SA.get_nowait()
        queue1_put(QUEUE_SA, self.saligned)
    except Empty:
        self.saligned = False

    if QUEUE_P1.qsize():
        self.paras1 = QUEUE_P1.get_nowait()
        queue1_put(QUEUE_P1, self.paras1)
    if QUEUE_P2.qsize():
        self.paras2 = QUEUE_P2.get_nowait()
        queue1_put(QUEUE_P2, self.paras2)
    if QUEUE_PM.qsize():
        self.paras_merit = QUEUE_PM.get_nowait()
        queue1_put(QUEUE_PM, self.paras_merit)
    if QUEUE_S1.qsize():
        self.sents1 = QUEUE_S1.get_nowait()
        queue1_put(QUEUE_S1, self.sents1)
    if QUEUE_S2.qsize():
        self.sents2 = QUEUE_S2.get_nowait()
        queue1_put(QUEUE_S2, self.sents2)
    if QUEUE_SM.qsize():
        self.sents_merit = QUEUE_SM.get_nowait()
        queue1_put(QUEUE_SM, self.sents_merit)

    filename1 = self.filename1
    filename2 = self.filename2

    if not (filename1 and filename2):
        logger.info(" filename1: %s, filename2: %s not loaded", filename1, filename2)
        # message = f"filename1: *{filename1}*, filename2: *{filename2}* not loaded"
        messagebox.showwarning(title="Not ready", message=f"filename1: *{filename1}*, filename2: *{filename2}* not loaded")
        return None

    logger.debug("file1: %s", filename1)
    logger.debug("file2: %s", filename2)

    prefix = common_prefix(
        [
            Path(filename1).stem,
            Path(filename2).stem,
        ]
    )
    xlsxfile_p = gen_filename(f"{prefix}aligned-p.xlsx")
    xlsxfile_s = gen_filename(f"{prefix}aligned-s.xlsx")
    path_ = Path(filename1).parent
    xlsxfile_p = f"{path_ / xlsxfile_p}"
    xlsxfile_s = f"{path_ / xlsxfile_s}"

    msg = "\n"
    logger.debug(" self.saligned %s, self.paligned %s", self.saligned, self.paligned)

    logger.debug(" self.paras1: %s, self.paras2: %s", self.paras1, self.paras2)
    if self.paligned:
        df_ = DataFrame({
            "text1": self.paras1,
            "text2": self.paras2,
            "merit": self.paras_merit
        })
        df_.to_excel(xlsxfile_p, index=False, header=False)
        logger.info(" Aligned paras saved to %s", xlsxfile_p)
        msg += " Aligned paras saved to %s\n" % xlsxfile_p

    # logger.debug(" self.sents1[:5]: %s, self.sents2[:5]: %s", self.sents1[:5], self.sents2[:5])

    if self.saligned:
        _ = """
        df_ = DataFrame({
            "text1": self.sents1,
            "text2": self.sents2,
            "merit": self.sents_merit
        })
        # """
        df_ = self.Table.model.df
        # remove all empty rows
        # df0.replace("", np.nan).dropna(axis=0)
        # df_ = df_.replace("", np.nan).dropna(axis=0)
        df_.replace("", np.nan, inplace=True)
        df_.dropna(axis=0, inplace=True)

        try:
            df_.to_excel(xlsxfile_s, index=False, header=False)
            logger.info(" Aligned sents saved to %s", xlsxfile_s)
            msg += " Aligned sents saved to %s" % xlsxfile_s
        except Exception as exc:
            logger.error(" df_.to_excel exc: %s", exc)
            msg += " Saving xlsx exc: %s " % exc


    msg = msg.strip()
    if msg:
        messagebox.showinfo(title="File(s) saved", message=msg)
        logger.debug("xlsfile: %s, %s", xlsxfile_p, xlsxfile_s)
    else:
        message = "Do some work first..."
        messagebox.showwarning(title="Nothing to save, yet", message=message)
    logger.info("savexlsx_command")
コード例 #55
0
ファイル: metrics.py プロジェクト: fractos/cupcake
def get_metrics_in_group(metrics_group_id, metrics_definitions):
    logger.debug("get_metrics_in_group: {}".format(metrics_group_id))
    for metrics_group in metrics_definitions["groups"]:
        if metrics_group["id"] == metrics_group_id:
            for metrics_id in metrics_group["metrics"]:
                yield metrics_id
コード例 #56
0
from bee_aligner.common_prefix import common_prefix

from queue import Empty
from queue1_put import queue1_put
from queues import (QUEUE_PA, QUEUE_SA,
                    QUEUE_P1, QUEUE_P2, QUEUE_PM,
                    QUEUE_S1, QUEUE_S2, QUEUE_SM)

# logger = logzero.setup_logger(name=__file__, level=10)

_ = os.environ.get("ALIGNER_DEBUG")
if _ is not None and _.lower() in ["1", "true"]:
    logzero.loglevel(10)  # 10: DEBUG, default 20: INFO:
else:
    logzero.loglevel(20)
logger.debug('os.environ.get("ALIGNER_DEBUG"): %s', _)


def savexlsx_command(self, event=None) -> None:
    """ savexlsx_command.
    self is aligner
    self.Table.model.df: DataFrame of interest

    self.filename1, self.filename2

    """

    # check QUEUE_PA QUEUE_SA
    try:
        self.paligned = QUEUE_PA.get_nowait()
        queue1_put(QUEUE_PA, self.paligned)
コード例 #57
0
def desired_equals_healthy_tags(tags: List[Dict[str, str]],
                                configuration: Configuration = None,
                                secrets: Secrets = None) -> AWSResponse:
    """
    If desired number matches the number of healthy instances

    for each of the auto-scaling groups matching tags provided

    `tags` are  expected as:
    [{
        'Key': 'KeyName',
        'Value': 'KeyValue'
    },
    ...
    ]

    Returns: bool
    """

    if not tags:
        raise FailedActivity("Non-empty tags is required")

    client = aws_client('autoscaling', configuration, secrets)

    # The following is needed because AWS API does not support filters
    # on auto-scaling groups

    # fetch all ASGs using paginator
    page_iterator = client.get_paginator(
        'describe_auto_scaling_groups').paginate(
            PaginationConfig={'PageSize': 100})
    asg_descrs = {'AutoScalingGroups': []}

    for page in page_iterator:
        asg_descrs['AutoScalingGroups'].extend(page['AutoScalingGroups'])

    filter_set = set(map(lambda x: "=".join([x['Key'], x['Value']]), tags))

    group_sets = list(
        map(
            lambda g: {
                'Name':
                g['AutoScalingGroupName'],
                'Tags':
                set(map(lambda t: "=".join([t['Key'], t['Value']]), g['Tags']))
            }, asg_descrs['AutoScalingGroups']))

    filtered_groups = [
        g['Name'] for g in group_sets if filter_set.issubset(g['Tags'])
    ]

    logger.debug("filtered groups: {}".format(filtered_groups))

    if filtered_groups:
        groups_descr = client.describe_auto_scaling_groups(
            AutoScalingGroupNames=filtered_groups)
    else:
        raise FailedActivity(
            "No auto-scaling groups matched the tags provided")

    return is_desired_equals_healthy(groups_descr)
コード例 #58
0
async def check_bot(
        bot,
        bot_checker: BotChecker,
        to_check: BotModel,
        result_queue: asyncio.Queue
):
    log.debug("Checking bot {}...".format(to_check.username))

    try:
        peer = bot_checker.resolve_bot(to_check)
    except UsernameNotOccupied:
        markup = InlineKeyboardMarkup([[
            InlineKeyboardButton(captions.EDIT_BOT, callback_data=util.callback_for_action(
                CallbackActions.EDIT_BOT,
                dict(id=to_check.id)
            ))
        ]])
        text = "{} does not exist (anymore). Please resolve this " \
               "issue manually!".format(to_check.username)
        try:
            bot.send_message(settings.BLSF_ID, text, reply_markup=markup)
        except BadRequest:
            bot.send_notification(text)
        return await result_queue.put('not found')

    if not peer:
        return await result_queue.put('skipped')

    bot_checker.update_bot_details(to_check, peer=peer)

    # Check online state
    try:
        response = await bot_checker.get_ping_response(
            to_check,
            timeout=30,
            try_inline=to_check.inlinequeries)
    except UnknownError as e:
        await result_queue.put(e.MESSAGE)
        return
    except Exception as e:
        log.exception(e)
        await result_queue.put(str(e))
        return

    for _ in range(2):
        await result_queue.put('messages sent')

    was_offline = to_check.offline
    is_offline = response.empty if isinstance(response, Response) else not bool(response)

    now = datetime.now()
    to_check.last_ping = now
    if not is_offline:
        to_check.last_response = now

    if was_offline != is_offline:
        bot.send_message(settings.BOTLIST_NOTIFICATIONS_ID, '{} went {}.'.format(
            to_check.str_no_md,
            'offline' if to_check.offline else 'online'
        ), timeout=40)

    await add_keywords(bot, response, to_check)

    # Download profile picture
    if settings.DOWNLOAD_PROFILE_PICTURES:
        await download_profile_picture(bot, bot_checker, to_check)

    to_check.save()

    if settings.DELETE_CONVERSATION_AFTER_PING:
        await bot_checker.schedule_conversation_deletion(to_check.chat_id, 10)

    await disable_decider(bot, to_check)

    await result_queue.put('offline' if to_check.offline else 'online')
コード例 #59
0
ファイル: NodeLeader.py プロジェクト: LuoRyan/neo-python
 def clientConnectionFailed(self, connector, reason):
     address = "%s:%s" % (connector.host, connector.port)
     logger.debug("Dropped connection from %s " % address)
     for peer in NodeLeader.Instance().Peers:
         if peer.Address == address:
             peer.connectionLost()
コード例 #60
0
 def Log(self, msg):
     logger.debug("[%s][mode %s] %s - %s" %
                  (self.identifier, self.sync_mode, self.endpoint, msg))