示例#1
0
    def Account_SetVotes(self, engine):

        try:
            account = engine.EvaluationStack.Pop().GetInterface()

            vote_list = engine.EvaluationStack.Pop().GetArray()
        except Exception as e:
            logger.error("could not get account or votes: %s " % e)
            return False

        if account is None or len(vote_list) > 1024:
            return False

        if account.IsFrozen:
            return False

        balance = account.BalanceFor(Blockchain.SystemShare().Hash)

        if balance == Fixed8.Zero() and len(vote_list) > 0:
            return False

        acct = self._accounts.GetAndChange(account.AddressBytes)
        voteset = []
        for v in vote_list:
            if v not in voteset:
                voteset.append(v.GetByteArray())
        acct.Votes = voteset

        # print("*****************************************************")
        # print("SET ACCOUNT VOTES %s " % json.dumps(acct.ToJson(), indent=4))
        # print("*****************************************************")
        return True
示例#2
0
    def get_by_contract(self, contract_hash):
        """
        Look up a set of notifications by the contract they are associated with
        Args:
            contract_hash (UInt160 or str): hash of contract for notifications to be retreived

        Returns:
            list: a list of notifications
        """
        hash = contract_hash
        if isinstance(contract_hash, str) and len(contract_hash) == 40:
            hash = UInt160.ParseString(contract_hash)

        if not isinstance(hash, UInt160):
            raise Exception("Incorrect address format")

        contractlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT).snapshot()
        results = []

        for val in contractlist_snapshot.iterator(prefix=bytes(hash.Data), include_key=False):
            if len(val) > 4:
                try:
                    event = SmartContractEvent.FromByteArray(val)
                    results.append(event)
                except Exception as e:
                    logger.error("could not parse event: %s %s" % (e, val))
        return results
示例#3
0
    def StepInto(self):
        if self._InvocationStack.Count == 0:
            self._VMState |= VMState.HALT

        if self._VMState & VMState.HALT > 0 or self._VMState & VMState.FAULT > 0:
            logger.info("stopping because vm state is %s " % self._VMState)
            return

        op = None

        if self.CurrentContext.InstructionPointer >= len(self.CurrentContext.Script):
            op = RET
        else:
            op = self.CurrentContext.OpReader.ReadByte(do_ord=False)

        self.ops_processed += 1

        try:
            if self._is_write_log:
                self.write_log("{} {}".format(self.ops_processed, ToName(op)))
            self.ExecuteOp(op, self.CurrentContext)
        except Exception as e:
            error_msg = "COULD NOT EXECUTE OP (%s): %s %s %s" % (self.ops_processed, e, op, ToName(op))
            self.write_log(error_msg)

            if self._exit_on_error:
                self._VMState |= VMState.FAULT
            else:
                logger.error(error_msg)
                logger.exception(e)
示例#4
0
 def __init__(self, path):
     try:
         self._db = SqliteDatabase(path, check_same_thread=False)
         PWDatabase.DBProxy().initialize(self._db)
         self.startup()
     except Exception as e:
         logger.error("database file does not exist, or incorrect permissions")
示例#5
0
    def Asset_Renew(self, engine):

        current_asset = engine.EvaluationStack.Pop().GetInterface()

        if current_asset is None:
            return False

        years = engine.EvaluationStack.Pop().GetBigInteger()

        asset = self._assets.GetAndChange(current_asset.AssetId.ToBytes())

        if asset.Expiration < Blockchain.Default().Height + 1:
            asset.Expiration = Blockchain.Default().Height + 1

        try:

            asset.Expiration = asset.Expiration + years * 2000000

        except Exception as e:
            logger.error("could not set expiration date %s " % e)

            asset.Expiration = sys.maxsize

        # tx = engine.ScriptContainer
        # print("*****************************************************")
        # print("Renewed ASSET %s " % tx.Hash.ToBytes())
        # print("*****************************************************")
        engine.EvaluationStack.PushT(StackItem.FromInterface(asset))

        engine.EvaluationStack.PushT(asset.Expiration)

        return True
    def Deserialize(self, reader):
        """
        Deserialize full object.

        Args:
            reader (neo.IO.BinaryReader):
        """
        usage = reader.ReadByte()
        self.Usage = usage

        if usage == TransactionAttributeUsage.ContractHash or usage == TransactionAttributeUsage.Vote or \
                (usage >= TransactionAttributeUsage.Hash1 and usage <= TransactionAttributeUsage.Hash15):
            self.Data = reader.ReadBytes(32)

        elif usage == TransactionAttributeUsage.ECDH02 or usage == TransactionAttributeUsage.ECDH03:
            self.Data = bytearray(usage) + bytearray(reader.ReadBytes(32))

        elif usage == TransactionAttributeUsage.Script:
            self.Data = reader.ReadBytes(20)

        elif usage == TransactionAttributeUsage.DescriptionUrl:

            self.Data = reader.ReadBytes(reader.ReadByte())

        elif usage == TransactionAttributeUsage.Description or usage >= TransactionAttributeUsage.Remark:
            self.Data = reader.ReadVarBytes(max=self.MAX_ATTR_DATA_SIZE)
        else:
            logger.error("format error!!!")
示例#7
0
    def DeserializeStackItem(reader):
        stype = reader.ReadUInt8()
        if stype == StackItemType.ByteArray:
            return ByteArray(reader.ReadVarBytes())
        elif stype == StackItemType.Boolean:
            return Boolean(reader.ReadByte())
        elif stype == StackItemType.Integer:
            return Integer(BigInteger.FromBytes(reader.ReadVarBytes(), signed=True))
        elif stype == StackItemType.Array:
            stack_item = Array()
            count = reader.ReadVarInt()
            while count > 0:
                count -= 1
                stack_item.Add(StackItem.DeserializeStackItem(reader))
            return stack_item
        elif stype == StackItemType.Struct:
            stack_item = Struct(value=None)
            count = reader.ReadVarInt()
            while count > 0:
                count -= 1
                stack_item.Add(StackItem.DeserializeStackItem(reader))
            return stack_item
        elif stype == StackItemType.Map:
            stack_item = Map()
            count = reader.ReadVarInt()
            while count > 0:
                count -= 1
                key = StackItem.DeserializeStackItem(reader)
                val = StackItem.DeserializeStackItem(reader)
                stack_item.SetItem(key, val)
            return stack_item

        else:
            logger.error("Could not deserialize stack item with type: %s " % stype)
        return None
示例#8
0
    def get_by_addr(self, address):
        """
        Lookup a set of notifications by address
        Args:
            address (UInt160 or str): hash of address for notifications

        Returns:
            list: a list of notifications
        """
        addr = address
        if isinstance(address, str) and len(address) == 34:
            addr = Helper.AddrStrToScriptHash(address)

        if not isinstance(addr, UInt160):
            raise Exception("Incorrect address format")

        addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot()
        results = []

        for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False):
            if len(val) > 4:
                try:
                    event = SmartContractEvent.FromByteArray(val)
                    results.append(event)
                except Exception as e:
                    logger.error("could not parse event: %s %s" % (e, val))
        return results
示例#9
0
    def AddTransaction(self, tx):
        """
        Add a transaction to the memory pool.

        Args:
            tx (neo.Core.TX.Transaction): instance.

        Returns:
            bool: True if successfully added. False otherwise.
        """
        if BC.Default() is None:
            return False

        if tx.Hash.ToBytes() in self.MemPool.keys():
            return False

        if BC.Default().ContainsTransaction(tx.Hash):
            return False

        if not tx.Verify(self.MemPool.values()):
            logger.error("Veryfiying tx result... failed")
            return False

        self.MemPool[tx.Hash.ToBytes()] = tx

        return True
    def FromJson(jsn, isMultiSig=True):
        try:
            parsed = json.loads(jsn)
            if parsed['type'] == 'Neo.Core.ContractTransaction':
                verifiable = ContractTransaction()
                ms = MemoryStream(binascii.unhexlify(parsed['hex']))
                r = BinaryReader(ms)
                verifiable.DeserializeUnsigned(r)
                context = ContractParametersContext(verifiable, isMultiSig=isMultiSig)
                for key, value in parsed['items'].items():
                    if "0x" in key:
                        key = key[2:]
                    key = key.encode()
                    parameterbytes = []
                    for pt in value['parameters']:
                        if pt['type'] == 'Signature':
                            parameterbytes.append(0)
                    contract = Contract.Create(value['script'], parameterbytes, key)
                    context.ContextItems[key] = ContextItem(contract)
                    if 'signatures' in value:
                        context.ContextItems[key].Signatures = value['signatures']

                return context
            else:
                raise ("Unsupported transaction type in JSON")

        except Exception as e:
            logger.error("Failed to import ContractParametersContext from JSON: {}".format(e))
示例#11
0
    def AsSerializableWithType(buffer, class_name):
        """

        Args:
            buffer (BytesIO/bytes): stream to deserialize `class_name` to.
            class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'

        Returns:
            object: if deserialization is successful.
            None: if deserialization failed.
        """
        module = '.'.join(class_name.split('.')[:-1])
        klassname = class_name.split('.')[-1]
        klass = getattr(importlib.import_module(module), klassname)
        mstream = StreamManager.GetStream(buffer)
        reader = BinaryReader(mstream)

        try:
            serializable = klass()
            serializable.Deserialize(reader)
            return serializable
        except Exception as e:
            logger.error("Could not deserialize: %s %s" % (e, class_name))
        finally:
            StreamManager.ReleaseStream(mstream)

        return None
示例#12
0
 def BuildDatabase(self):
     self._db = PWDatabase(self._path).DB
     try:
         self._db.create_tables([Account, Address, Coin, Contract, Key, NEP5Token, VINHold,
                                 Transaction, TransactionInfo, NamedAddress], safe=True)
     except Exception as e:
         logger.error("Could not build database %s " % e)
示例#13
0
    def DeleteAddress(self, script_hash):
        success, coins_toremove = super(UserWallet, self).DeleteAddress(script_hash)

        for coin in coins_toremove:
            try:
                c = Coin.get(TxId=bytes(coin.Reference.PrevHash.Data), Index=coin.Reference.PrevIndex)
                c.delete_instance()
            except Exception as e:
                logger.error("Could not delete coin %s %s " % (coin, e))

        todelete = bytes(script_hash.ToArray())

        for c in Contract.select():

            address = c.Address
            if address.ScriptHash == todelete:
                c.delete_instance()
                address.delete_instance()

        try:
            address = Address.get(ScriptHash=todelete)
            address.delete_instance()
        except Exception as e:
            pass

        return True, coins_toremove
示例#14
0
    def GetBalance(self, wallet, address, as_string=False):
        """
        Get the token balance.

        Args:
            wallet (neo.Wallets.Wallet): a wallet instance.
            address (str): public address of the account to get the token balance of.
            as_string (bool): whether the return value should be a string. Default is False, returning an integer.

        Returns:
            int/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.
        """
        addr = parse_param(address, wallet)
        if isinstance(addr, UInt160):
            addr = addr.Data
        sb = ScriptBuilder()
        sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr])

        tx, fee, results, num_ops = test_invoke(sb.ToArray(), wallet, [])

        try:
            val = results[0].GetBigInteger()
            precision_divisor = pow(10, self.decimals)
            balance = Decimal(val) / Decimal(precision_divisor)
            if as_string:
                formatter_str = '.%sf' % self.decimals
                balance_str = format(balance, formatter_str)
                return balance_str
            return balance
        except Exception as e:
            logger.error("could not get balance: %s " % e)
            traceback.print_stack()

        return 0
示例#15
0
    def LoadStoredData(self, key):
        logger.debug("Looking for key %s " % key)
        try:
            return Key.get(Name=key).Value
        except Exception as e:
            logger.error("Could not get key %s " % e)

        return None
示例#16
0
def attr_obj_to_tx_attr(obj):
    try:
        datum = obj['data']
        if type(datum) is str:
            datum = datum.encode('utf-8')
        usage = obj['usage']
        return TransactionAttribute(usage=usage, data=datum)
    except Exception as e:
        logger.error("could not convert object %s into TransactionAttribute: %s " % (obj, e))
    return None
示例#17
0
    def ScriptHash(self):

        if self._scriptHash is None:
            try:
                self._scriptHash = Crypto.ToScriptHash(self.Script)
            except binascii.Error:
                self._scriptHash = Crypto.ToScriptHash(self.Script, unhex=False)
            except Exception as e:
                logger.error("Could not create script hash: %s " % e)

        return self._scriptHash
示例#18
0
    def ExecutionCompleted(self, engine, success, error=None):

        height = Blockchain.Default().Height + 1
        tx_hash = None

        if engine.ScriptContainer:
            tx_hash = engine.ScriptContainer.Hash

        if not tx_hash:
            tx_hash = UInt256(data=bytearray(32))

        entry_script = None
        try:
            # get the first script that was executed
            # this is usually the script that sets up the script to be executed
            entry_script = UInt160(data=engine.ExecutedScriptHashes[0])

            # ExecutedScriptHashes[1] will usually be the first contract executed
            if len(engine.ExecutedScriptHashes) > 1:
                entry_script = UInt160(data=engine.ExecutedScriptHashes[1])
        except Exception as e:
            logger.error("Could not get entry script: %s " % e)

        payload = []
        for item in engine.EvaluationStack.Items:
            payload_item = stack_item_to_py(item)
            payload.append(payload_item)

        if success:

            # dispatch all notify events, along with the success of the contract execution
            for notify_event_args in self.notifications:
                self.events_to_dispatch.append(NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, notify_event_args.State,
                                                           notify_event_args.ScriptHash, height, tx_hash,
                                                           success, engine.testMode))

            if engine.Trigger == Application:
                self.events_to_dispatch.append(SmartContractEvent(SmartContractEvent.EXECUTION_SUCCESS, payload, entry_script,
                                                                  height, tx_hash, success, engine.testMode))
            else:
                self.events_to_dispatch.append(SmartContractEvent(SmartContractEvent.VERIFICATION_SUCCESS, payload, entry_script,
                                                                  height, tx_hash, success, engine.testMode))

        else:
            if engine.Trigger == Application:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.EXECUTION_FAIL, [payload, error, engine._VMState],
                                       entry_script, height, tx_hash, success, engine.testMode))
            else:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.VERIFICATION_FAIL, [payload, error, engine._VMState],
                                       entry_script, height, tx_hash, success, engine.testMode))

        self.notifications = []
示例#19
0
    def Runtime_Deserialize(self, engine):

        data = engine.EvaluationStack.Pop().GetByteArray()

        ms = StreamManager.GetStream(data=data)
        reader = BinaryReader(ms)
        try:
            stack_item = StackItem.DeserializeStackItem(reader)
            engine.EvaluationStack.PushT(stack_item)
        except Exception as e:
            logger.error("Colud not Deserialize stack item: %s " % e)
            return False
        return True
示例#20
0
    def LoadCoins(self):
        coins = {}

        try:
            for coin in Coin.select():
                reference = CoinReference(prev_hash=UInt256(coin.TxId), prev_index=coin.Index)
                output = TransactionOutput(UInt256(coin.AssetId), Fixed8(coin.Value), UInt160(coin.ScriptHash))
                walletcoin = WalletCoin.CoinFromRef(reference, output, coin.State)
                coins[reference] = walletcoin
        except Exception as e:
            logger.error("could not load coins %s " % e)

        return coins
示例#21
0
    def create_schema(self):
        logger.debug("postgresql_database: create_schema()")
        con = None

        try:
            con = psycopg2.connect(self.connection_string)
            cur = con.cursor()
            cur.execute("CREATE TABLE active (environment_group CHARACTER VARYING(500) NOT NULL, environment CHARACTER VARYING(500) NOT NULL, endpoint_group CHARACTER VARYING(500) NOT NULL, endpoint CHARACTER VARYING(500) NOT NULL, timestamp INTEGER NOT NULL, message CHARACTER VARYING(500) NOT NULL, url CHARACTER VARYING(500) NOT NULL)")
            con.commit()
        except psycopg2.Error as e:
            logger.error("postgresql_database: problem during create_schema() - %s" % str(e))
        finally:
            if con:
                con.close()
示例#22
0
    def create_schema(self):
        logger.debug("sqlite_database: create_schema()")
        con = None

        try:
            con = sqlite3.connect(self.db_name)
            cur = con.cursor()
            cur.execute("CREATE TABLE active (environment_group TEXT, environment TEXT, endpoint_group TEXT, endpoint TEXT, timestamp INTEGER, message TEXT, url TEXT)")
            con.commit()
        except sqlite3.Error as e:
            logger.error("sqlite_database: problem during create_schema() - %s" % str(e))
        finally:
            if con:
                con.close()
示例#23
0
 def save_active(self, incident):
     logger.debug("postgresql_database: save_active()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute("INSERT INTO active VALUES (%s,%s,%s,%s,%s,%s,%s)",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint, incident.timestamp, incident.message, incident.endpoint.url))
         con.commit()
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during save_active() - %s" % str(e))
     finally:
         if con:
             con.close()
示例#24
0
 def save_active(self, incident):
     logger.debug("sqlite_database: save_active()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         cur = con.cursor()
         cur.execute("INSERT INTO active VALUES (?,?,?,?,?,?,?)",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint, incident.timestamp, incident.message, incident.endpoint.url))
         con.commit()
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during save_active() - %s" % str(e))
     finally:
         if con:
             con.close()
示例#25
0
 def remove_active(self, incident):
     logger.debug("postgresql_database: remove_active()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute("DELETE FROM active WHERE environment_group = %s AND environment = %s AND endpoint_group = %s AND endpoint = %s",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         con.commit()
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during remove_active() - %s" % str(e))
     finally:
         if con:
             con.close()
示例#26
0
    def AddNEP5Token(self, token):
        """
        Add a NEP-5 compliant token to the wallet.

        Args:
            token (NEP5Token): an instance of type neo.Wallets.NEP5Token.

        Note:
            Prints a warning to the console if the token already exists in the wallet.
        """
        if token.ScriptHash.ToBytes() in self._tokens.keys():
            logger.error("Token already in wallet")
            return
        self._tokens[token.ScriptHash.ToBytes()] = token
示例#27
0
 def remove_active(self, incident):
     logger.debug("sqlite_database: remove_active()")
     con = None
     try:
         con = sqlite3.connect(self.db_name)
         cur = con.cursor()
         cur.execute("DELETE FROM active WHERE environment_group = ? AND environment = ? AND endpoint_group = ? AND endpoint = ?",
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         con.commit()
     except sqlite3.Error as e:
         logger.error("sqlite_database: problem during remove_active() - %s" % str(e))
     finally:
         if con:
             con.close()
示例#28
0
 def get_all_actives(self):
     logger.debug("postgresql_database: get_all_actives()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
         cur.execute("SELECT * FROM active")
         data = cur.fetchall()
         return data
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during get_all_actives() - %s" % str(e))
         return None
     finally:
         if con:
             con.close()
示例#29
0
 def active_exists(self, incident):
     logger.debug("postgresql_database: active_exists()")
     con = None
     try:
         con = psycopg2.connect(self.connection_string)
         cur = con.cursor()
         cur.execute('''SELECT COUNT(*) FROM active WHERE environment_group = %s AND environment = %s AND endpoint_group = %s AND endpoint = %s''',
             (incident.endpoint.environment_group, incident.endpoint.environment, incident.endpoint.endpoint_group, incident.endpoint.endpoint))
         data = cur.fetchone()
         return int(data[0]) > 0
     except psycopg2.Error as e:
         logger.error("postgresql_database: problem during active_exists() - %s" % str(e))
     finally:
         if con:
             con.close()
示例#30
0
def install_conda_env(ctx):
    """Installs virtual environment."""
    try:
        with ctx.prefix(SOURCE_CONDA):
            log.info("install conda environment")
            ctx.run(
                f"conda create -n {CONDA_ENV_NAME} 'python >=3.6' --file requirements.txt --yes"
            )
    except UnexpectedExit as err:
        result = err.args[0]
        if "prefix already exists" in result.stderr:
            log.info("Conda env already exists; moving to next step.")
        else:
            log.error(err)
            raise err
示例#31
0
###############################################################################
# Restore Main Exe
###############################################################################
# OAM Connection
try:
    ssh.connect(OAMIP,username=SSHUSER,password=SSHPASS,port=SSHPORT,timeout=10)
    logger.info('OAM Connection Success.')

    # Check System Status
    try:
        _CHKSTAT = """%s/bin/mcsadmin getSystemStatus  | tail -n +9  | sed '/^$/d' | grep 'System' | awk '{ printf $2 }'""" % INSTALLPATH    
        stdin, stdout, stderr = ssh.exec_command(_CHKSTAT)
        _sshdata = stdout.readlines()
        MCSSTATUS = _sshdata[0]
    except Exception as ERR:
        logger.error("Can't find ColumnStore Path : " + INSTALLPATH + " for OAM.")
        exit()
    
    # System Shutdown Check
    if MCSSTATUS == "MAN_OFFLINE":
        logger.info('MCS Status : ' + str(MCSSTATUS))
        ###############################################################################
        # MODULE INFO
        ###############################################################################
        _CHKIP = """%s/bin/mcsadmin getSystemnetworkconfig  | tail -n +7 | sed '/^$/d' | awk '{ printf $1":"$7 " "; }'""" % INSTALLPATH
        stdin, stdout, stderr = ssh.exec_command(_CHKIP)
        _sshdata = stdout.readlines()
        _sshdata = _sshdata[0]
        _sshdata_con = _sshdata.split(" ") 
        MCSSVR=[]
        for _svr in _sshdata_con:
示例#32
0
def make_plots(mut_lines_dir: Path,
               organ_vols: pd.DataFrame,
               label_meta_file: Path,
               stats_root_dir: Path,
               skip_no_analysis=False,
               organ_subset: List = [],
               extra_dir: Path = Path(''),
               voxel_size: float = 27.0):
    """

    Parameters
    ----------
    mut_lines_dir
        Lama registration root. eg: mutants/output  with each sibdir containing a line
    organ_vols
        All organ volume.
            index=spec_id,
            cols = label_nums, + staging and line
    wt_staging
        Aggregated staging info for each baseline
    label_meta_file
        CSV of atlas metadata
    stats_root_dir
        Contains a folder for each line
    skip_no_analysis
        If there is a no_analysis=True flag in the meta data csv, skip this organ
    organ_subset
        plot only the labels with these label numbers. Or can be used to order the output of the plots
    extra_dir
        Bit of a bodge, but if doing the non-permutation-based stats, the organ vol csv is in a directory below.
        Give the name here (currently 'organ_volumes')
    voxel_size
        For calculating correct organ volumes
    """
    if label_meta_file:
        label_meta = pd.read_csv(label_meta_file, index_col=0)
    else:
        label_meta = None

    organ_vols.rename(columns={'staging': WEV_LABEL}, inplace=True)

    for mut_line_dir in mut_lines_dir.iterdir():

        if not mut_line_dir.is_dir():
            continue

        print(mut_line_dir.name)

        stats_line_dir = stats_root_dir / mut_line_dir.name / extra_dir  # extra_dir does nothing if == ''

        line = mut_line_dir.name

        #TODO: Get file by startswith line name and endswith extension (Could be date of analysis in middle)
        # Rather tan just getting any CSVs in there

        stats_result_file = getfile_startswith_endswith(
            stats_line_dir, line, '.csv')

        # Get mutant staging and organ volumes
        # line_vols = []
        # line_stage = []

        # for spec_dir in mut_line_dir.iterdir():
        #     if str(spec_dir).endswith('_'):
        #         continue

        # staging = pd.read_csv(spec_dir / 'output' / 'staging_info_volume.csv', index_col=0)
        # organ_vols = pd.read_csv(spec_dir / 'output' / 'organ_volumes.csv', index_col=0)
        #
        # line_vols.append(organ_vols)
        # line_stage.append(staging)

        # df_stage_mut = pd.concat(line_stage, axis=0)
        # df_stage_mut['genotype'] = 'mutant'
        # df_stage_mut.rename(columns={'value': 'staging'},  inplace=True) # Get rid of this
        # df_vol_mut = pd.concat(line_vols, axis=0)
        df_hits = pd.read_csv(stats_result_file, index_col=0)

        # staging_df = pd.concat([wt_staging, df_stage_mut])
        # staging_df.rename(columns={'staging': wev}, inplace=True)

        # vol_df = pd.concat([organ_vols, df_vol_mut])
        # Check that concat worked
        # if vol_df.shape[1] != organ_vols.shape[1] and vol_df.shape[1] != df_vol_mut.shape[1]:
        #     raise ValueError('Error merging WT and mutant organ volume dataframes')

        if 'significant_cal_p' in df_hits:  # 'permutation stats
            hits: pd.DataFrame = df_hits[df_hits['significant_cal_p'] == True]
        elif 'significant_bh_q_5' in df_hits:
            hits: pd.DataFrame = df_hits[df_hits['significant_bh_q_5'] == True]
        else:
            logging.error(
                "Plots not made: Stats output file must have 'significant_cal_p' or 'significant_bh_q_5' column"
            )

        if label_meta is not None and 'organ_system_name' in label_meta.columns and 'organ_system_name' not in hits:
            # Sort by organ system if present in atlas metadata
            hits = hits.merge(label_meta[['organ_system_name']],
                              how='left',
                              left_index=True,
                              right_index=True)
            hits.sort_values(by='organ_system_name', inplace=True)

        if skip_no_analysis:
            # Skip organ that are flagged with no_analysis in the atlas metadata file
            if 'no_analysis' not in hits:
                hits = hits[hits['no_analysis'] != True]

        if len(hits) < 1:
            logging.info(
                f'No hits, so Skipping organ vol plots for: {mut_line_dir.name}'
            )
            continue

        # st = wt_staging['staging']
        # normed_wt = organ_vols.div(st, axis=0)

        # normed_mut = df_vol_mut.div(df_stage_mut['staging'], axis=0)

        numcol = 6 if len(hits) > 5 else len(hits)
        numrows = math.ceil(len(hits) / numcol)

        figsize_y = 5 * numrows
        figsize_x = 5 * numcol

        fig = Figure(figsize=(figsize_x, figsize_y))
        FigureCanvas(fig)

        fig_scat = Figure(figsize=(figsize_x, figsize_y))
        FigureCanvas(fig_scat)

        if organ_subset:
            labels_to_plot = organ_subset

            if len(set(labels_to_plot).intersection(
                    hits.index)) != len(labels_to_plot):
                print(
                    'Some label numbers in organ_subset are not in the hits DataFrame'
                )
                return
        else:
            labels_to_plot = hits.index

        # organ vols to to mm3
        um3_conv_factor = voxel_size**3  # To convert voxels to um3
        um3_to_mm3_conv_factor = 1e9

        for col in organ_vols.columns:
            if col.isdigit() or col == WEV_LABEL:
                organ_vols[col] = (organ_vols[col] *
                                   um3_conv_factor) / um3_to_mm3_conv_factor
        # organ_vols[organ_vol] = (scattter_df[organ_vol] * um3_conv_factor) / um3_to_mm3_conv_factor
        # scattter_df[wev] = (scattter_df[wev] * um3_conv_factor) / um3_to_mm3_conv_factor

        # for i, (label, row) in enumerate(hits.iterrows()):
        for i, label in enumerate(labels_to_plot):
            if 'label_name' in hits:
                label_name: str = hits.loc[label, 'label_name']
            else:
                label_name: str = label
            axes = fig.add_subplot(numrows, numcol, i + 1)
            axes.tick_params(labelsize=18)
            axes.set_yticklabels([])

            label = str(label)

            if label_meta is not None and 'short_name' in label_meta:
                label_name = label_meta.at[int(label), 'short_name']
            else:
                label_name = str(label_name)
            title = label_name.replace('_', ' ')

            # Scatterplot
            s_axes = fig_scat.add_subplot(numrows, numcol, i + 1)
            s_axes.tick_params(labelsize=18)

            scatter_df = organ_vols.loc[(organ_vols.line == 'baseline') |
                                        (organ_vols.line == line)]
            scatter_df = scatter_df[[label, WEV_LABEL, 'line']]
            scatter_df.rename(columns={
                label: label_name,
                'line': 'genotype'
            },
                              inplace=True)
            sax = sns.scatterplot(y=label_name,
                                  x=WEV_LABEL,
                                  ax=s_axes,
                                  hue='genotype',
                                  data=scatter_df)

            sax.set(xlabel='Whole embryo volume (mm^3)')
            sax.set(ylabel='Organ volume (mm^3)')

            sax.set_title(title, fontsize=16)
            sax.ticklabel_format(style='sci', scilimits=(0, 0))

            # x 10^7 instead of 1e7
            sax.xaxis.major.formatter._useMathText = True
            sax.yaxis.major.formatter._useMathText = True

            formatting.label_offset(sax)

        fig.subplots_adjust(top=0.8)  # TODO fix this for larger plot
        fig.suptitle(line, fontsize=30, y=0.98)
        # fig.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

        if skip_no_analysis:
            box_name = f'{line}_boxplots_no_analysis.png'
        else:
            box_name = f'{line}_boxplots.png'

        # TODO: Fix the boxplot or swarm plot output
        # fig.savefig(stats_line_dir / box_name)

        fig_scat.subplots_adjust(top=0.8, wspace=0.35,
                                 hspace=0.4)  # TODO fix this for larger plot
        fig_scat.suptitle(line, fontsize=30, y=0.98)
        fig_scat.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

        if skip_no_analysis:
            scatter_name = f'{line}_scatter_plots_no_analysis_normalised.png'
        else:
            scatter_name = f'{line}_scatter_plots.png'
        fig_scat.savefig(stats_line_dir / scatter_name)
示例#33
0
    def VM_FAULT_and_report(self, id, *args):
        self._VMState |= VMState.FAULT

        if id == VMFault.INVALID_JUMP:
            error_msg = "Attemping to JMP/JMPIF/JMPIFNOT to an invalid location."

        elif id == VMFault.INVALID_CONTRACT:
            script_hash = args[0]
            error_msg = "Trying to call an unknown contract with script_hash {}\nMake sure the contract exists on the blockchain".format(
                script_hash)

        elif id == VMFault.CHECKMULTISIG_INVALID_PUBLICKEY_COUNT:
            error_msg = "CHECKMULTISIG - provided public key count is less than 1."

        elif id == VMFault.CHECKMULTISIG_SIGNATURE_ERROR:
            if args[0] < 1:
                error_msg = "CHECKMULTISIG - Minimum required signature count cannot be less than 1."
            else:  # m > n
                m = args[0]
                n = args[1]
                error_msg = "CHECKMULTISIG - Insufficient signatures provided ({}). Minimum required is {}".format(
                    m, n)

        elif id == VMFault.UNPACK_INVALID_TYPE:
            item = args[0]
            error_msg = "Failed to UNPACK item. Item is not an array but of type: {}".format(
                type(item))

        elif id == VMFault.PICKITEM_INVALID_TYPE:
            index = args[0]
            item = args[1]
            error_msg = "Cannot access item at index {}. Item is not an array but of type: {}".format(
                index, type(item))

        elif id == VMFault.PICKITEM_NEGATIVE_INDEX:
            error_msg = "Attempting to access an array using a negative index"

        elif id == VMFault.PICKITEM_INVALID_INDEX:
            index = args[0]
            length = args[1]
            error_msg = "Array index {} exceeds list length {}".format(
                index, length)

        elif id == VMFault.APPEND_INVALID_TYPE:
            item = args[0]
            error_msg = "Cannot append to item. Item is not an array but of type: {}".format(
                type(item))

        elif id == VMFault.REVERSE_INVALID_TYPE:
            item = args[0]
            error_msg = "Cannot REVERSE item. Item is not an array but of type: {}".format(
                type(item))

        elif id == VMFault.REMOVE_INVALID_TYPE:
            item = args[0]
            index = args[1]
            error_msg = "Cannot REMOVE item at index {}. Item is not an array but of type: {}".format(
                index, type(item))

        elif id == VMFault.REMOVE_INVALID_INDEX:
            index = args[0]
            length = args[1]

            if index < 0:
                error_msg = "Cannot REMOVE item at index {}. Index < 0".format(
                    index)

            else:  # index >= len(items):
                error_msg = "Cannot REMOVE item at index {}. Index exceeds array length {}".format(
                    index, length)

        elif id == VMFault.UNKNOWN_OPCODE:
            opcode = args[0]
            error_msg = "Unknown opcode found: {}".format(opcode)

        else:
            error_msg = id
            pass

        logger.error("({}) {}".format(self.ops_processed, error_msg))
        return
示例#34
0
camera = PiCamera()
camera.resolution(1920, 1080)
camera.start_preview()
photo_counter = 1
picture_interval_mins = 5
logger.info('{} - Connected to PiCam NoIR'.format(datetime.datetime.utcnow()))

csv_header = [
    'Date/Time', 'Temp', 'Humidity', 'Pressure', 'Magnetic_X', 'Magnetic_Y',
    'Magnetic_Z'
]

try:
    create_data_file()
except Exception as e:
    logger.error('{}: {})'.format(e.__class__.__name__, e))
else:
    logger.info('{} - Data file created successfully'.format(
        datetime.datetime.utcnow()))

logger.info('{} - Init complete, main programme starting'.format(
    datetime.datetime.utcnow()))
now_time = datetime.datetime.utcnow()

while (now_time < start_time + datetime.timedelta(minutes=175)):
    try:
        # Main program - take environment data every iteration. Take a photo
        # every five minutes
        pass  # **** Main body of Code to go here with 30s interval ****
    except Exception as e:
        logger.error('{}: {})'.format(e.__class__.__name__, e))
示例#35
0
    logger.info("%s, %s,", text, res)

    if len(sys.argv) > 1:
        text = " ".join(sys.argv[1:])
    else:
        text = "test this and that"

    res = await deepl_tr(text)
    logger.info("%s, %s,", text, res)


if __name__ == "__main__":
    try:
        loop = asyncio.get_event_loop()
    except Exception as exc:
        logger.error(exc)
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    try:
        loop.run_until_complete(main())
    except Exception as exc:
        logger.error(exc)
    finally:
        loop.close()

    _ = """
    import sys

    text = "test this and that and more"
    res = LOOP.run_until_complete(deepl_tr(text))
示例#36
0
 def getConf(self, conf):
     try:
         with open('%s' % (conf), 'r') as f:
             return yaml.load(f)
     except:
         logger.error("unable to load specified config file %s " % conf)
示例#37
0
    def execute(self):
        """Execute checks."""
        res = self.check_args(self.args)
        if res:  # pragma: nocover
            return res

        logger.info("Starting sea-snap check-irods %s", self.command_name)
        logger.info("  args: %s", self.args)

        # --- get lists
        # files on SODAR
        files = self.get_files()
        files_rel = [
            f.replace(self.args.irods_path + "/", "") for f in files["files"]
        ]
        logger.info("Files on SODAR (first 20): %s", ", ".join(files_rel[:19]))

        # samples in project
        with open(os.path.join(self.args.results_folder, "sample_info.yaml"),
                  "r") as stream:
            samples = list(yaml.safe_load(stream)["sample_info"])
        logger.info("Samples in sample_info.yaml: %s", ", ".join(samples))

        # destinations from blueprint file
        blueprint = Path(
            self.args.results_folder) / self.args.transfer_blueprint
        dests = [
            self.remote_path(dest) for dest in set(
                re.findall(r"i:__SODAR__/(\S+)", blueprint.read_text()))
        ]  # noqa: W605

        # --- run tests
        # all there?
        not_there = [
            d for d in dests if d not in files_rel and d[-4:] != ".md5"
        ]
        if not_there:
            e_msg = "Some files have not been uploaded to SODAR: " + ", ".join(
                not_there)
            logger.error(e_msg)
            res = 1
            if not self.args.yes and not input(
                    "Continue? [yN] ").lower().startswith("y"):
                logger.error("OK, breaking at your request")
                return None
            # raise FileNotFoundError(e_msg)

        # samples covered?
        non_covered_samples = [
            s for s in samples if not any(s in f for f in files_rel)
        ]
        if non_covered_samples:
            logger.warning(
                "These samples are in the sample sheet, but have no corresponding files on SODAR: %s",
                ", ".join(non_covered_samples),
            )
            if not self.args.yes and not input(
                    "Continue? [yN] ").lower().startswith("y"):
                logger.error("OK, breaking at your request")
                return None

        # generic tests (md5 sums, metadata, #replicas)
        self.run_tests(files)

        logger.info("All done")
        return res
示例#38
0
文件: Tasks.py 项目: sethll/condex
def perform_algo_task():
    coinsAboveThreshold = {}
    coinsElgibleForIncrease = {}

    indexInfo = DatabaseManager.get_index_info_model()

    if indexInfo.Active == True:
        percentage_btc_amount = indexInfo.TotalBTCVal * (
            indexInfo.BalanceThreshold / 100)
        logger.debug("Percentage_to_btc_amount: " + str(percentage_btc_amount))

        if percentage_btc_amount <= CondexConfig.BITTREX_MIN_BTC_TRADE_AMOUNT:
            logger.debug("Current BTC Threshold Value To Low - " +
                         str(percentage_btc_amount))
        else:
            # Generate our winners/lossers list
            for indexedCoin in DatabaseManager.get_all_index_coin_models():
                if indexedCoin.UnrealizedGain >= indexInfo.BalanceThreshold:
                    coinsAboveThreshold[
                        indexedCoin.Ticker] = indexedCoin.UnrealizedGain
                elif indexedCoin.UnrealizedGain <= indexInfo.BalanceThreshold:
                    coinsElgibleForIncrease[
                        indexedCoin.Ticker] = indexedCoin.UnrealizedGain

            # Sort our tables
            coinsAboveThreshold = Util.tuple_list_to_dict(
                sorted(coinsAboveThreshold.iteritems(),
                       key=lambda (k, v): (v, k),
                       reverse=True))
            coinsElgibleForIncrease = Util.tuple_list_to_dict(
                sorted(coinsElgibleForIncrease.iteritems(),
                       key=lambda (k, v): (v, k),
                       reverse=True))

            if len(coinsAboveThreshold) >= 1:
                logger.debug("Currently " + str(len(coinsAboveThreshold)) +
                             " avalible for rebalance")
                logger.debug(coinsAboveThreshold)

                if len(coinsElgibleForIncrease) >= 1:
                    logger.debug("Currently " +
                                 str(len(coinsElgibleForIncrease)) +
                                 " elgible for increase")
                    logger.debug(coinsElgibleForIncrease)
                    for akey in coinsAboveThreshold:

                        # Check to see if we still have coins to increase
                        if len(coinsElgibleForIncrease) >= 1:

                            elgibleCoinTicker = coinsElgibleForIncrease.keys(
                            )[0]

                            rebalanceCoinLocked = False
                            elgibleCoinLocked = False

                            if DatabaseManager.get_coin_lock_model(akey):
                                rebalanceCoinLocked = True

                            if DatabaseManager.get_coin_lock_model(
                                    elgibleCoinTicker):
                                rebalanceCoinLocked = True

                            if rebalanceCoinLocked == False and elgibleCoinLocked == False:

                                indexCoinInfo = DatabaseManager.get_index_coin_model(
                                    akey)
                                coinBalance = DatabaseManager.get_coin_balance_model(
                                    akey)

                                rebalanceSpecialTicker = akey + "/BTC"

                                if akey == "BTC":
                                    rebalanceSpecialTicker = "BTC/USDT"

                                rebalanceCoinTickerModel = DatabaseManager.get_ticker_model(
                                    rebalanceSpecialTicker)
                                elgibleCoinTickerModel = DatabaseManager.get_ticker_model(
                                    elgibleCoinTicker + "/BTC")

                                amountOfRebalanceToSell = 0.0

                                if akey == "BTC":
                                    amountOfRebalanceToSell = percentage_btc_amount
                                else:
                                    amountOfRebalanceToSell = percentage_btc_amount / rebalanceCoinTickerModel.BTCVal

                                amountOfEligbleToBuy = percentage_btc_amount / elgibleCoinTickerModel.BTCVal

                                if coinBalance.TotalCoins >= amountOfRebalanceToSell:
                                    DatabaseManager.create_coin_lock_model(
                                        akey)
                                    DatabaseManager.create_coin_lock_model(
                                        elgibleCoinTicker)

                                    logger.info("Performing Rebalance " +
                                                akey.upper() + " " +
                                                str(amountOfRebalanceToSell) +
                                                " - " +
                                                elgibleCoinTicker.upper() +
                                                " " +
                                                str(amountOfEligbleToBuy))
                                    #perform_rebalance_task.s(akey.upper(), amountOfRebalanceToSell, elgibleCoinTicker.upper(), amountOfEligbleToBuy)
                                    app.send_task(
                                        'Tasks.perform_rebalance_task',
                                        args=[
                                            akey.upper(),
                                            amountOfRebalanceToSell,
                                            elgibleCoinTicker.upper(),
                                            amountOfEligbleToBuy
                                        ])
                                    # Need to remove the eligbile coin from dictireonary
                                    del coinsElgibleForIncrease[
                                        elgibleCoinTicker]
                                else:
                                    logger.error(
                                        "Failed to sell coins - we do not have enough of "
                                        + str(akey))

                            else:
                                logger.debug("One of the coins where locked")

                else:
                    logger.debug("No coins eligible for increase")
            else:
                logger.debug("No coins above threshold")
示例#39
0
文件: Tasks.py 项目: sethll/condex
def wallet_update_task():

    em = ExchangeManager()
    walletData = em.get_balance()
    btcUsdValue = em.get_btc_usd_value()

    totalBtcValue = 0.0

    logger.info("Starting Wallet Update Task")

    for key in DatabaseManager.get_all_supported_coin_models():

        btcbalance = 0.0
        usdBalance = 0.0
        totalCoins = None
        fullTicker = key.Ticker + "/BTC"

        if key.Ticker == 'BTC':
            fullTicker = 'BTC/USDT'

        tickerModel = DatabaseManager.get_ticker_model(fullTicker)

        try:
            btcbalance = walletData[key.Ticker]['total'] * tickerModel.BTCVal
            totalCoins = walletData[key.Ticker]['total']
            usdBalance = btcUsdValue * btcbalance
        except:
            btcbalance = 0.0
            totalCoins = 0.0

        if key.Ticker == 'BTC':
            btcbalance = walletData[key.Ticker]['total']
            usdBalance = btcUsdValue * btcbalance

        indexedCoin = DatabaseManager.get_index_coin_model(key.Ticker)

        if indexedCoin is not None:
            totalBtcValue = totalBtcValue + btcbalance

        if DatabaseManager.create_coin_balance_model(key.Ticker, btcbalance,
                                                     usdBalance, totalCoins,
                                                     datetime.datetime.now()):
            #logger.debug("Created Coin Balance Model - " + key.Ticker)
            pass
        else:
            if DatabaseManager.update_coin_balance_model(
                    key.Ticker, btcbalance, btcUsdValue * btcbalance,
                    totalCoins, datetime.datetime.now()):
                #logger.debug("Updated Coin Balance Model - " + key.Ticker)
                pass
            else:
                logger.error("Failed Update Coin Balance Model - " +
                             key.Ticker)

    totalUnrealizeGain = 0.0
    totalRealizedGain = 0.0

    for key in DatabaseManager.get_all_supported_coin_models():

        coinBalance = DatabaseManager.get_coin_balance_model(key.Ticker)
        indexedCoin = DatabaseManager.get_index_coin_model(key.Ticker)
        realizedGainModel = DatabaseManager.get_realized_gain_model(key.Ticker)

        if indexedCoin is not None:

            if DatabaseManager.update_index_coin_model(
                    indexedCoin.Ticker, indexedCoin.DesiredPercentage,
                (coinBalance.BTCBalance / totalBtcValue) * 100,
                ((coinBalance.BTCBalance / totalBtcValue) * 100) -
                    indexedCoin.DesiredPercentage, indexedCoin.Locked):
                totalUnrealizeGain = totalUnrealizeGain + (
                    ((coinBalance.BTCBalance / totalBtcValue) * 100) -
                    indexedCoin.DesiredPercentage)
                totalRealizedGain = totalRealizedGain + realizedGainModel.RealizedGain

                logger.debug("Total unrealized gain - " +
                             str(totalUnrealizeGain))
                logger.debug("Updated Indexed Coin Model - " +
                             indexedCoin.Ticker)
            else:
                logger.error("Failed To Update Indexed Coin Model - " +
                             indexedCoin.Ticker)

    indexInfo = DatabaseManager.get_index_info_model()

    totalUnrealizeGain = totalUnrealizeGain

    if DatabaseManager.update_index_info_model(
            indexInfo.Active, totalBtcValue, btcUsdValue * totalBtcValue,
            totalRealizedGain, totalUnrealizeGain, indexInfo.BalanceThreshold,
            indexInfo.OrderTimeout, indexInfo.OrderRetryAmount,
            indexInfo.RebalanceTickSetting):
        logger.debug("Updated Index Info Model")
    else:
        logger.error("Failed To Update Index Info Model")

    logger.info("Wallet Update Task Completed")
示例#40
0
                f"--proxy-server={proxy}",
                "--disable-popup-blocking",  #
            ],
            executablePath=executable_path,  # use chrome
            # autoClose=False,
            headless=headless,
            # devtools=devtools,  # replace headless
            dumpio=True,
            # userDataDir=".",
            userDataDir=tempdir,
        )
    except Exception as exc:
        logger.error("get_ppbrowser exc: %s", exc)
        raise
    # page = await browser.newPage()
    # await page.goto(url)
    # logger.debug("page.goto deepl time: %.2f s", default_timer() - then)
    return browser


try:
    BROWSER = LOOP.run_until_complete(get_ppbrowser(not HEADFUL))
except Exception as exc:
    logger.error(" Unable to pyppeteer.launch exc: %s", exc)
    logger.info(
        "\n\t%s",
        r"Possible cause: abnormal exit from a previous session. Try `taskkill /f /im chrome.exe`",
    )
    logger.warning(" %s", "Note that this will also kill your chrome browser.")
    raise SystemExit(1)
示例#41
0
def check_args(args) -> int:
    """Argument checks that can be checked at program startup but that cannot be sensibly checked with ``argparse``."""
    any_error = False

    # if set for pulling ISA files
    if args.project_uuid:
        # Check presence of SODAR URL and auth token.
        if not args.sodar_auth_token:  # pragma: nocover
            logger.error(
                "SODAR authentication token is empty.  Either specify --sodar-auth-token, or set "
                "SODAR_AUTH_TOKEN environment variable")
            any_error = True
        if not args.sodar_url:  # pragma: nocover
            logger.error(
                "SODAR URL is empty. Either specify --sodar-url, or set SODAR_URL."
            )
            any_error = True

        # Check output file presence vs. overwrite allowed.
        if (hasattr(args.output_folder, "name")
                and Path(args.output_folder).exists()):  # pragma: nocover
            if not args.overwrite_isa:
                logger.error(
                    "The output folder %s already exists but --allow-overwrite not given.",
                    args.output_folder,
                )
                any_error = True
            else:
                logger.warn(
                    "Output folder %s exists but --allow-overwrite given.",
                    args.output_folder)

        # Check UUID syntax.
        try:
            val: typing.Optional[str] = str(UUID(args.project_uuid))
        except ValueError:  # pragma: nocover
            val = None
        finally:
            if args.project_uuid != val:  # pragma: nocover
                logger.error("Project UUID %s is not a valid UUID",
                             args.project_uuid)
                any_error = True

    # Check options --isa-assay vs. --from_file
    if args.from_file:
        logger.info(
            "Option --from_file is set, in-path-pattern will be ignored.")
        if args.isa_assay:
            logger.error(
                "Both --isa-assay and --from_file are set, choose one.")
            any_error = True

    # Check output file presence vs. overwrite allowed.
    if (hasattr(args.output_file, "name")
            and args.output_file.name != "<stdout>" and Path(
                args.output_file.name).stat().st_size != 0):  # pragma: nocover
        if not args.allow_overwrite:
            logger.error(
                "The output path %s already exists but --allow-overwrite not given.",
                args.output_file.name,
            )
            any_error = True
        else:
            logger.warn("Output path %s exists but --allow-overwrite given.",
                        args.output_file)

    return int(any_error)
示例#42
0
import inspect
import json
import sys
from urllib import parse as url_parser
from broker.settings import settings
from broker.helpers import results_filter
from logzero import logger
from datetime import datetime

try:
    import awxkit
except:
    logger.error("Unable to import awxkit. Is it installed?")
    raise Exception("Unable to import awxkit. Is it installed?")

from broker.providers import Provider
from broker import helpers

AT_URL = settings.ANSIBLETOWER.base_url
UNAME = settings.ANSIBLETOWER.get("username")
PWORD = settings.ANSIBLETOWER.get("password")
TOKEN = settings.ANSIBLETOWER.get("token")
RELEASE_WORKFLOW = settings.ANSIBLETOWER.release_workflow
EXTEND_WORKFLOW = settings.ANSIBLETOWER.extend_workflow
AT_TIMEOUT = settings.ANSIBLETOWER.workflow_timeout


class AnsibleTower(Provider):
    def __init__(self, **kwargs):
        self._construct_params = []
        config = kwargs.get("config", awxkit.config)
示例#43
0
def load_secrets_from_vault(
        secrets_info: Dict[str, Dict[str, str]],  # noqa: C901
        configuration: Configuration = None,
        extra_vars: Dict[str, Any] = None) -> Secrets:
    """
    Load secrets from Vault KV secrets store

    In your experiment:

    ```
    {
        "k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar"
            }
        }
    }
    ```

    This will read the Vault secret at path `secret/foo/bar`
    (or `secret/data/foo/bar` if you use Vault KV version 2) and store its
    entirely payload into Chaos Toolkit `mykey`. This means, that all kays
    under that path will be available as-is. For instance, this could be:

    ```
    {
        "mypassword": "******",
        "mylogin": "******"k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar",
                "key": "mypassword"
            }
        }
    }
    ```

    In that case, `mykey` will be set to the value at `secret/foo/bar` under
    the Vault secret key `mypassword`.
    """
    secrets = {}

    client = create_vault_client(configuration)

    for (target, keys) in secrets_info.items():
        secrets[target] = {}

        for (key, value) in keys.items():
            if isinstance(value, dict) and value.get("type") == "vault":
                if not HAS_HVAC:
                    logger.error(
                        "Install the `hvac` package to fetch secrets "
                        "from Vault: `pip install chaostoolkit-lib[vault]`.")
                    return {}

                path = value.get("path")
                if path is None:
                    logger.warning(
                        "Missing Vault secret path for '{}'".format(key))
                    continue

                # see https://github.com/chaostoolkit/chaostoolkit/issues/98
                kv = client.secrets.kv
                is_kv1 = kv.default_kv_version == "1"
                if is_kv1:
                    vault_payload = kv.v1.read_secret(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"))
                else:
                    vault_payload = kv.v2.read_secret_version(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"))

                if not vault_payload:
                    logger.warning(
                        "No Vault secret found at path: {}".format(path))
                    continue

                if is_kv1:
                    data = vault_payload.get("data")
                else:
                    data = vault_payload.get("data", {}).get("data")

                if "key" in value:
                    vault_key = value["key"]
                    if vault_key not in data:
                        logger.warning(
                            "No Vault key '{}' at secret path '{}'".format(
                                vault_key, path))
                        continue

                    secrets[target][key] = data.get(vault_key)

                else:
                    secrets[target][key] = data

        if not secrets[target]:
            secrets.pop(target)

    return secrets
示例#44
0
def load_config_yaml(path: pathlib.Path) -> typing.Any:
    with path.open("r") as f:
        try:
            return yaml.safe_load(f)
        except yaml.YAMLError as e:
            logger.error("error: %s", e)
def check_thread_update(self, thr):
    """ check thread thr and update pbar via QUEUE.

    called in myprogressbar_ui called by paligner/saligner
    self is pbar
    in palign_command:
        pbar = myprogressbar_ui.Mypbar(window)
        [window = tk.Toplevel(top)] / top = self.top / self(aligner)
    """

    pbar = self

    if thr.is_alive():
        # logger.debug("+++ check_threa_update %s ++alive++ ", thr.name)

        # pbar.TProgressbar1.step()
        # pbar.TProgressbar1['value'] = thr.value  # this does not update for some weird reason

        # c_value = pbar.TProgressbar1['value']
        # c_value = pbar.TProgressbar1['value']

        # set pbar:
        # pbar.TProgressbar1['value'] = number

        def update_status():
            """ update running label.
            need to reuse this when the thread is terminated
            """
            old_value = pbar.TProgressbar1["value"]
            if QUEUE.qsize():  # pbar queue
                c_value = QUEUE.get_nowait()
                logger.debug("**== check_thread_update QUEUE. c_value: %.2f",
                             c_value)

                step = c_value - old_value
                pbar.TProgressbar1.step(step)

            # counter text queue
            if QUEUE_C.qsize():
                text = QUEUE_C.get_nowait()
                logger.debug("**== check_thread_update QUEUE_C text: %s", text)

                # myprogressbar_ui_support.pbarvar.set(text)
                myprogressbar_ui_support.statustext.set(text)

                # pbar.TLabel1.delete('0.0', 'end')
                # pbar.TLabel1.insert('0.0', text)

        update_status()

        # resursive
        # pbar.TProgressbar1.after(500, lambda: check_thread_update(thr, pbar))
        pbar.TProgressbar1.after(500, lambda: check_thread_update(pbar, thr))

    else:  # not thr.is_alive(), finished aligning
        # update_status()

        logger.debug("*** %s not alive", thr.name)
        logger.debug(
            # "value(pbar): %s:thr.value %s", pbar.TProgressbar1["value"], thr.value
            # thr.value removed, TProgressbar1 fetches value from QUEUE/QUEUE_C
            "value(pbar): %s",
            pbar.TProgressbar1["value"])

        # extra update after thread exited
        # c_value = pbar.TProgressbar1["value"]
        # step = thr.value - c_value
        # pbar.TProgressbar1.step(step - 0.1)
        # pbar.TProgressbar1.update()

        old_value = pbar.TProgressbar1["value"]
        if QUEUE.qsize():  # pbar queue
            c_value = QUEUE.get_nowait()
            logger.debug("**== check_thread_update QUEUE. c_value: %.2f",
                         c_value)

            step = c_value - old_value
            pbar.TProgressbar1.step(step - 0.1)

        # update running label/statustext
        if QUEUE_C.qsize():
            text = QUEUE_C.get_nowait()
            logger.debug("**== check_thread_update QUEUE_C text: %s", text)

            # myprogressbar_ui_support.pbarvar.set(text)
            myprogressbar_ui_support.statustext.set(text)

        logger.debug(" Restore buttons 1 2 3 states")
        # pbar(self) TButton1: Start, 2: Cancel,3: Back
        pbar.TButton1.config(state=tk.NORMAL)
        pbar.TButton2.config(state=tk.DISABLED)
        pbar.TButton3.config(state=tk.NORMAL)

        # stop the indeterminate progressbar
        pbar.TProgressbar1.stop()
        pbar.TProgressbar1['value'] = 1
        # pbar.TProgressbar1.destroy()  # dose not work

        try:
            flag = QUEUE_PS.get_nowait()
            queue1_put(QUEUE_PS, flag)
        except Empty:
            flag = ""

        logger.debug(" flag = QUEUE_PS.get_nowait(): %s ", flag)

        # ######### SENTS ###########
        # set QUEUE_SA self.saligned

        if flag in ["s"]:  # activated from salign_command

            # update Table and Pad from bee_aligner.plist_to_slist thread
            # in myprogressbar_ui/start_command

            try:
                s_list = thr.flist  # thr is c_th in plist_to_slist
            except Exception as exc:
                logger.error(
                    "s_list = thr.flist exc: %s, maybe thread has not run well or has been attached a different result",
                    exc)

                return None

            logger.debug("s_list[:5]: %s", s_list[:5])

            logger.debug(
                """ SIG_TABLE.send("check_thread_update", df=s_list) """)
            SIG_TABLE.send("check_thread_update", df=s_list)

            # set QUEUE_SA self.saligned
            queue1_put(QUEUE_SA, True)

            # set QUEUE_S1, QUEUE_S2 QUEUE_SM
            df = DataFrame(s_list)
            queue1_put(QUEUE_S1, df[0])
            queue1_put(QUEUE_S2, df[1])
            queue1_put(QUEUE_SM, df[2])
            queue1_put(QUEUE_PS, 's')

            logger.info("Sent aligning completed.")

            logger.debug(
                """messagebox.showinfo(" Sent aligning completed!", "You can edit the upper table..."""
            )
            messagebox.showinfo(
                " Sent aligning completed!",
                "You can edit the upper table manually and press Start to realign or press Back"
            )

            signal = {
                "PAlign": False,
                "SAlign": True,
                "pbtoplevel": False,  # pbar grab_release
            }
            logger.debug(
                """pbar-s send blinker.signal to aligner slot:
                    %s """, signal)
            SIG_ALIGNER.send("check_thread_update1", **signal)

            logger.debug(" <check_thread_update> exit")

            return None

        # ######### PARAS ###########
        # set QUEUE_PA self.paligned

        # update Table and Pad from bee_aligner.bee_aligner thread
        # in myprogressbar_ui/start_command

        logger.debug(
            """messagebox.showinfo(" Para aligning completed!", "You can adjust..."""
        )
        messagebox.showinfo(
            " Paras aligning completed!",
            "You can adjust the threshold and press Start realign or press Back and edit the upper table manually."
        )

        para_list = thr.para_list  # thr is c_th in bee_aligner
        SIG_TABLE.send("check_thread_update", df=para_list)

        queue1_put(QUEUE_PA, True)

        # set QUEUE_P1, QUEUE_P2 QUEUE_PM
        df = DataFrame(para_list)
        # logger.debug(" DataFrame(p_list).head(3): %s", df.head(3))

        self.paras1 = df[0]
        self.paras2 = df[1]
        self.paras_merit = df[2]
        # self.text1 = "\n".join(self.paras1)
        # self.text2 = "\n".join(self.paras2)

        queue1_put(QUEUE_P1, self.paras1)
        queue1_put(QUEUE_P2, self.paras2)
        queue1_put(QUEUE_PM, self.paras_merit)

        # QUEUE_T1/2 only needed for Ctrl-P for the first time
        # queue1_put(QUEUE_T1, self.text1)
        # queue1_put(QUEUE_T2, self.text2)

        self.paligned = True
        queue1_put(QUEUE_PA, True)

        # QUEUE_PS also for another purpose:
        # controls branching in check_thread_update
        # self.Table.model.df currently is paras1/2/_merit
        queue1_put(QUEUE_PS, 'p')  # update QUEUE_PS

        # cp_p
        logger.info(
            "update: self.paras1/2/_merit, Q_P1/2/M self.text1/2, Q_T1/2 self.paligned/Q_PA "
        )
        logger.info("paras aligning completed.")
        logger.debug(" check_thread_update-p exit")

        # enable SAlign in pbar.back_command to avoid confusion

        signal = {"pbtoplevel": True}
        logger.debug("send blinker.signal to aligner slot, signal: %s", signal)
        SIG_ALIGNER.send("check_thread_update", **signal)
示例#46
0
            _DBROOTPATHDATA = stdout.readlines()
            MCSDBROOT.append(_DBROOTPATHDATA[0]) 
        logger.info("DBROOT PATH : " + str(MCSDBROOT))

        ###############################################################################
        # suspendDBWrites(OAM)
        ###############################################################################        
        _SUSPENDWRITE = """%s/bin/mcsadmin suspendDatabaseWrites y""" % INSTALLDIR
        stdin, stdout, stderr = ssh.exec_command(_SUSPENDWRITE)
        _sshdata = stdout.readlines()
        _SUSPENDRESULT = _sshdata[4].split(" ")
        
        if len(_SUSPENDRESULT) > 7 and _SUSPENDRESULT[7][:-1] == "completed":
            logger.info("Suspend DB Write Complete.")
        else:
            logger.error("Suspend DB Write Error.")         

        # OAM Session Close
        ssh.close() 
        ###############################################################################       
        # Backup Execusion
        ###############################################################################
        for idx,DIVSVR in enumerate(MCSSVR):
            globals()[DIVSVR['module']] = threading.Thread(target=backupEXC,args=tuple(DIVSVR.values()))          
            globals()[DIVSVR['module']].start()           
        
        for idx,DIVSVR in enumerate(MCSSVR):
             globals()[DIVSVR['module']].join()

        ###############################################################################
        # OAM Reconnection
示例#47
0
async def login_euserv(
    email: Optional[str] = "",
    password: Optional[str] = "",
    browser=BROWSER,
) -> pyppeteer.page.Page:
    # fmt: on
    """Login to https://support.euserv.com/.

    return a pyppeteer.page.Page for subsequent processing.
    """
    try:
        _ = await browser.newPage()
    except Exception as exc:
        logger.error(exc)
        logger.info("Getting a new ppbrowser...")
        browser = await get_ppbrowser()

    try:
        page = await browser.newPage()
    except Exception as exc:
        logger.error(exc)
        raise

    ubound = 3
    idx = 0
    err_flag = False
    while idx < ubound:
        idx += 1  # retry ubound times
        logger.debug("Going to %s", URL)
        done, pending = await asyncio.wait([
            page.goto(URL),
            page.waitForNavigation(),
        ])
        err_flag = False
        for task in done:
            try:
                await task
            except Exception as exc:
                logger.error(exc)
                err_flag = True
        if err_flag:
            logger.info("Retry #%s", idx)
            sleep(randint(1, 10))
        else:
            break
    if err_flag:
        raise SystemError(
            "err_flag: %s, check previous error messages in the log" %
            err_flag)  # return

    # We give it another try
    try:
        _ = await page.waitForSelector(".td-title", {"timeout": 20000})

        # already logged in
        if "Logout" in (await page.content()):
            logger.info("Already logged in.")
            raise SystemExit(" Change this to return page ")
            # return page
    except Exception as exc:
        logger.error("Not logged in yet, exc: %s, proceed", exc)

    # proceed
    # wait for form/submit
    logger.debug("Trying logging in...")
    try:
        await page.waitForSelector(".td-title", {"timeout": 20000})
    except TimeoutError:
        logger.error(TimeoutError)
        raise
    except Exception as exc:
        logger.error(
            "Unable to fetch the page, network problem or euserv has changed page layout, %s, existing",
            exc)
        raise SystemExit(1) from exc

    if not email:
        email = os.environ.get("EUSERV_EMAIL")
    if not password:
        password = os.environ.get("EUSERV_PASSWORD")

    if not email:
        logger.error(
            'Supply email address login_euserv(email="...") or set it in .env or as ENVIRONMENT (set/export EUSERV_EMAILE="...")'
        )
        raise SystemExit(1)

    if not password:
        logger.error(
            'Supply password, e.g., login_euserv(password="******") or set it in .env or as ENVIRONMENT (set/export EUSERV_EMAILE="...")'
        )
        raise SystemExit(1)

    logger.info("\nemail: %s \npassword: %s", "*" * 6 + email[6:],
                "*" * (len(password) + 3))

    logger.debug("Logging in with email and password")
    try:
        await page.type('input[name="email"]', email, {"delay": 20})
        await page.type('input[name="password"]', password + "\n",
                        {"delay": 20})
        # await handle.type('input[name="email"]', email, {"delay": 20})
        # await handle.type('input[name="password"]', password, {"delay": 20})

        # bhandle = await page.xpath('//*[@id="clogs"]/button')
        # await bhandle[0].click()
    except Exception as exc:
        logger.error("Oh no, exc: %s, exiting", exc)
        raise SystemExit(1)

    # wait for page to load
    # kc2_order_customer_orders_tab_1 vServer
    logger.info("Waiting for 'Cover Page' to load...")
    try:
        # _ = await page.waitForSelector('#kc2_order_customer_orders_tab_1', {"timeout": 45000})
        # _ = await page.waitForSelector('#kc2_order_customer_orders_tab_1', {"timeout": 45000})
        _ = await page.waitForXPath('//*[contains(text(),"Cover Page")]',
                                    {"timeout": 45000})
    except Exception as exc:
        logger.error("No go, exc: %s, exiting", exc)
        if "Login failed" in (await page.content()):
            logger.error("""
                Login failed.
                Please check email address/customer ID and password.""")
        # raise Exception(str(exc))
        logger.warning("Bad news: we are _not_ in, closing the page")
        await page.close()

        return page  # use page.isClosed() to check

    # if "vServer" in (await page.content()):
    if "Cover Page" in (await page.content()):
        logger.info("Good news: we are in.")
    else:
        logger.warning(
            "Something is not right, maybe euserv's page layout is changed?")

    return page
示例#48
0
def backupEXC(module_nm,target_ip):
    # Declare
    ssh_cli = paramiko.SSHClient()
    ssh_cli.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    # Connection
    try:
        if not SSHPASS:
            ssh_cli.connect(target_ip,username=SSHUSER,port=SSHPORT,timeout=10)
            logger.info(module_nm + ' Module Connection Success.')
        else:
            ssh_cli.connect(target_ip,username=SSHUSER,password=SSHPASS,port=SSHPORT,timeout=10)
            logger.info(module_nm + ' Module Connection Success.')
    except Exception as MODULEERR:
        logger.error(MODULEERR)

    ###############################################################################
    # BEFORE BACKUP DELETE
    ###############################################################################
    TDAY = date.today()
    YDAY = TDAY + relativedelta(days=-(ARCHIVEDAY))
    DELDAY=YDAY.strftime('%Y%m%d')
    DELTARGET = BACKUPDIR + "/" + str(DELDAY)
    _rmTARGET = """rm -rf %s""" % DELTARGET
    ssh_cli.exec_command(_rmTARGET)

    ###############################################################################
    # CREATE BACKUPBASE DIRECTORY
    ###############################################################################
    BACKUPBASE = BACKUPDIR + "/" + str(BACKDAY)
    _backupDIRCHK = """(ls %s >> /dev/null 2>&1 && echo Y) || echo N""" % BACKUPBASE
    stdin, stdout, stderr = ssh_cli.exec_command(_backupDIRCHK)
    _backupDIRRESULT = stdout.readlines()
    
    
    if _backupDIRRESULT[0][:-1] == "N":
        _mkdir = """/usr/bin/mkdir -p %s""" % BACKUPBASE            
        ssh_cli.exec_command(_mkdir)

        stdin, stdout, stderr = ssh_cli.exec_command(_backupDIRCHK)
        _backupDIRRESULT = stdout.readlines()

        if _backupDIRRESULT[0][:-1] == "Y":
            logger.info(module_nm + " Directory Make Success : " + str(BACKUPBASE))
            _chonwDir = "/usr/bin/chown -R %s:%s %s" % (SSHUSER,SSHUSER,BACKUPBASE)
            ssh_cli.exec_command(_chonwDir)
        else:
            logger.error("Directory Make Failed : " + str(BACKUPBASE))
            exit()
    else:
        _chonwDir = "/usr/bin/chown -R %s:%s %s" % (SSHUSER,SSHUSER,BACKUPBASE)
        ssh_cli.exec_command(_chonwDir)
    ###############################################################################
    # Columnstore Config File Copy
    ###############################################################################
    try:
        _configCOPY = """cp %s/etc/Columnstore.xml %s""" % (INSTALLDIR,BACKUPBASE)
        _myconfigCOPY = """cp %s/mysql/my.cnf %s""" % (INSTALLDIR,BACKUPBASE)
        ssh_cli.exec_command(_configCOPY)
        ssh_cli.exec_command(_myconfigCOPY)
        logger.info(module_nm + " Configure File Copy Success.")
    except Exception as CONERR:
        logger.error(module_nm + " " + CONERR)

    ###############################################################################
    # Data File Copy
    ###############################################################################
    # CREATE DBROOT BACKUPDIR 
    DBROOTBACKUPDIR = BACKUPBASE + "/" + module_nm + "_dbroot"
    _dbrootMAKE = """/usr/bin/mkdir -p %s""" % DBROOTBACKUPDIR   
    ssh_cli.exec_command(_dbrootMAKE)
    logger.info(module_nm + " DBROOT Backup DIR Create Success.")

    _checkCOM = """du -sh %s | awk '{printf $1}'"""
    for idx,DBROOTPATH in enumerate(MCSDBROOT):        
        
        # DBROOT BACKUP(RSYNC)
        DATAORGDIR = INSTALLDIR + "/" + "data" + str(idx+1)
        DATADIR = DATAORGDIR + "/" + "000.dir"  
        _dataDIRCHK= """(ls %s >> /dev/null 2>&1 && echo Y) || echo N""" % DATADIR
        stdin, stdout, stderr = ssh_cli.exec_command(_dataDIRCHK)
        _DATARESULT = stdout.readlines()       

        if _DATARESULT[0][:-1] == "Y":
            logger.info(module_nm + " DBROOT Backup Start.")
            _backupEXE = """rsync -a --delete %s %s""" % (DATAORGDIR,DBROOTBACKUPDIR)
            ssh_cli.exec_command(_backupEXE)
            
            while True:
                # Check Command
                _checkORG = _checkCOM % DATAORGDIR
                _backupPATH = DBROOTBACKUPDIR + "/data" + str(idx+1)
                _checkBAKCUP = _checkCOM % _backupPATH
                stdin, stdout, stderr = ssh_cli.exec_command(_checkORG)
                _orgRESULT = stdout.readlines()
                stdin, stdout, stderr = ssh_cli.exec_command(_checkBAKCUP)
                _backRESULT = stdout.readlines()

                if _orgRESULT[0] == _backRESULT[0]:
                    logger.info(module_nm + " DBROOT Backup Success.")
                    break
                else:
                    time.sleep(5)            

    ###############################################################################
    # Version File Copy
    ###############################################################################
    try:       
        _versionCopy = """cp %s/releasenum %s""" % (INSTALLDIR,BACKUPBASE)
        ssh_cli.exec_command(_versionCopy)
        logger.info(module_nm + " Version File Copy.")
    except Exception as VERERR:
        logger.error(module_nm + " " + VERERR)

    # Connection Close
    ssh.close()
示例#49
0
async def add_orders(self, payload):
    keys = ["user", "pharmacy", "item", "smsCode"]
    state, msg = validate(keys, payload)
    if not state:
        return {
            'status': False,
            "msg": "参数校验失败",
            "code": StatusCode.miss_params_error.value
        }
    phone = payload['user'].get('telephone')
    code = payload.get('smsCode')
    if code != '999999':
        if not code:
            return {
                'status': False,
                "msg": "验证码不能为空",
                "code": StatusCode.miss_params_error.value
            }
        verify_state, verify_msg = SMSRecord.verify_code(phone, code)
        if not verify_state:
            return {
                'status': False,
                "msg": verify_msg,
                "code": StatusCode.miss_params_error.value
            }
    user = OrderUser.by_id_card(payload['user']['residentId'])
    userInfo = dict(userName=payload['user'].get('name'),
                    userIdCard=payload['user'].get('residentId'),
                    userPhone=payload['user'].get('telephone'),
                    communityName=payload['user'].get('communityName'),
                    communityDistrict=payload['user'].get('communityDistrict'),
                    communityAddress=payload['user'].get('communityAddress'))
    if not user:
        try:
            user = OrderUser(**userInfo)
            self.db.add(user)
            self.db.commit()
        except Exception as e:
            logger.error(f"Save user Error {e}")
            return {
                'status': False,
                'msg': "数据库操作失败",
                "code": StatusCode.db_error.value
            }
    else:
        user.update(**userInfo)
    orderInfo = dict(
        userId=user.id,
        productName=payload.get('item').get('name'),
        productQty=payload.get('item').get('quantity'),
        pharmacyName=payload.get('pharmacy').get('name'),
        pharmacyDistrict=payload.get('pharmacy').get('district'),
        pharmacyAddress=payload.get('pharmacy').get('address'),
    )
    try:
        order = Orders(**orderInfo)
        self.db.add(order)
        self.db.commit()
    except Exception as e:
        logger.error(f"Save user Error {e}")
        return {
            'status': False,
            'msg': "数据库操作失败",
            "code": StatusCode.db_error.value
        }
    return {
        'status': True,
        'msg': '预约成功',
        'code': StatusCode.success.value,
        "data": order.to_dict()
    }
    def ExecutionCompleted(self, engine, success, error=None):

        height = Blockchain.Default().Height
        tx_hash = None

        if engine.ScriptContainer:
            tx_hash = engine.ScriptContainer.Hash

        if not tx_hash:
            tx_hash = UInt256(data=bytearray(32))

        entry_script = None
        try:
            # get the first script that was executed
            # this is usually the script that sets up the script to be executed
            entry_script = UInt160(data=engine.ExecutedScriptHashes[0])

            # ExecutedScriptHashes[1] will usually be the first contract executed
            if len(engine.ExecutedScriptHashes) > 1:
                entry_script = UInt160(data=engine.ExecutedScriptHashes[1])
        except Exception as e:
            logger.error("Could not get entry script: %s " % e)

        payload = []
        for item in engine.EvaluationStack.Items:
            payload_item = stack_item_to_py(item)
            payload.append(payload_item)

        if success:

            # dispatch all notify events, along with the success of the contract execution
            for notify_event_args in self.notifications:
                self.events_to_dispatch.append(
                    NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY,
                                notify_event_args.State,
                                notify_event_args.ScriptHash, height, tx_hash,
                                success, engine.testMode))

            if engine.Trigger == Application:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.EXECUTION_SUCCESS,
                                       payload, entry_script, height, tx_hash,
                                       success, engine.testMode))
            else:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.VERIFICATION_SUCCESS,
                                       payload, entry_script, height, tx_hash,
                                       success, engine.testMode))

        else:
            if engine.Trigger == Application:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.EXECUTION_FAIL,
                                       [payload, error, engine._VMState],
                                       entry_script, height, tx_hash, success,
                                       engine.testMode))
            else:
                self.events_to_dispatch.append(
                    SmartContractEvent(SmartContractEvent.VERIFICATION_FAIL,
                                       [payload, error, engine._VMState],
                                       entry_script, height, tx_hash, success,
                                       engine.testMode))

        self.notifications = []
示例#51
0
def DoRun(contract_script,
          arguments,
          wallet,
          path,
          verbose=True,
          from_addr=None,
          min_fee=DEFAULT_MIN_FEE,
          invocation_test_mode=True):

    test = get_arg(arguments, 1)

    if test is not None and test == 'test':

        if wallet is not None:

            f_args = arguments[2:]
            i_args = arguments[6:]

            script = GatherLoadedContractParams(f_args, contract_script)

            tx, result, total_ops, engine = test_deploy_and_invoke(
                script, i_args, wallet, from_addr, min_fee,
                invocation_test_mode)
            i_args.reverse()

            return_type_results = []

            try:
                rtype = ContractParameterType.FromString(f_args[1])
                for r in result:
                    cp = ContractParameter.AsParameterType(rtype, r)
                    return_type_results.append(cp.ToJson())
            except Exception as e:
                logger.error(
                    'Could not convert result to ContractParameter: %s ' % e)

            if tx is not None and result is not None:
                if verbose:
                    print(
                        "\n-----------------------------------------------------------"
                    )
                    print("Calling %s with arguments %s " % (path, i_args))
                    print("Test deploy invoke successful")
                    print("Used total of %s operations " % total_ops)
                    print("Result %s " % return_type_results)
                    print("Invoke TX gas cost: %s " %
                          (tx.Gas.value / Fixed8.D))
                    print(
                        "-------------------------------------------------------------\n"
                    )

                return tx, result, total_ops, engine
            else:
                if verbose:
                    print("Test invoke failed")
                    print("tx is, results are %s %s " % (tx, result))

        else:

            print("please open a wallet to test built contract")

    return None, None, None, None
示例#52
0
    def invoke_method(self, method_name, *args):
        """ invoke a method of the smart contract """

        logger.info("invoke_method: method_name=%s, args=%s", method_name,
                    args)
        logger.info("Block %s / %s" % (str(Blockchain.Default().Height),
                                       str(Blockchain.Default().HeaderHeight)))

        self.open_wallet()

        if not self.wallet:
            raise Exception(
                "Open a wallet before invoking a smart contract method.")

        if self.tx_in_progress:
            raise Exception("Transaction already in progress (%s)" %
                            self.tx_in_progress.Hash.ToString())

        logger.info("making sure wallet is synced...")
        time.sleep(3)

        # Wait until wallet is synced:
        while True:
            percent_synced = int(100 * self.wallet._current_height /
                                 Blockchain.Default().Height)
            if percent_synced > 99:
                break
            logger.info(
                "waiting for wallet sync... height: %s. percent synced: %s" %
                (self.wallet._current_height, percent_synced))
            time.sleep(5)

        time.sleep(3)
        logger.info("wallet synced. checking if gas is available...")

        if not self.wallet_has_gas():
            raise Exception("Wallet has no gas.")

        _args = [self.contract_hash, method_name, str(list(args))]
        logger.info("TestInvokeContract args: %s", _args)
        tx, fee, results, num_ops = TestInvokeContract(self.wallet, _args)
        if not tx:
            raise Exception("TestInvokeContract failed")

        logger.info("TestInvokeContract done, calling InvokeContract now...")
        sent_tx = InvokeContract(self.wallet, tx, fee)

        if sent_tx:
            logger.info("InvokeContract success, transaction underway: %s" %
                        sent_tx.Hash.ToString())
            self.tx_in_progress = sent_tx

            found = self._wait_for_tx(sent_tx)
            if found:
                logger.info("✅ tansaction found! all done!")
            else:
                logger.error("=== TX not found!")

            self.close_wallet()

            self.tx_in_progress = None
            logger.info("InvokeContract done, tx_in_progress freed.")

        else:
            raise Exception("InvokeContract failed")
示例#53
0
def main(text, k=1):
    """Run main."""
    if k == 22:
        logger.info("Testing set_languages = ['en', 'de']")
        k = 2

        fastlid.set_languages = ["en", "de"]

    logger.debug(fastlid(text, k=k))


if __name__ == "__main__":
    import sys

    logger.debug(sys.argv)

    try:
        k = int(sys.argv[1])
    except Exception as e:
        logger.error(e)
        k = 1

    try:
        text = str(sys.argv[2])
    except Exception as e:
        logger.error(e)
        text = "test"

    logger.info("text: %s", text)
    main(text, k)
示例#54
0
def main():
    parser = argparse.ArgumentParser()

    # Network options
    group_network_container = parser.add_argument_group(title="Network options")
    group_network = group_network_container.add_mutually_exclusive_group(required=True)
    group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet")
    group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet")
    group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet")
    group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet")
    group_network.add_argument("--config", action="store", help="Use a specific config file")

    # Ports for RPC and REST api
    group_modes = parser.add_argument_group(title="Mode(s)")
    group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)")
    group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)")

    # Advanced logging setup
    group_logging = parser.add_argument_group(title="Logging options")
    group_logging.add_argument("--logfile", action="store", type=str, help="Logfile")
    group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)")
    group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]",
                               help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').")
    group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger")

    # Where to store stuff
    parser.add_argument("--datadir", action="store",
                        help="Absolute path to use for database directories")
    # peers
    parser.add_argument("--maxpeers", action="store", default=5,
                        help="Max peers to use for P2P Joining")

    # If a wallet should be opened
    parser.add_argument("--wallet",
                        action="store",
                        help="Open wallet. Will allow you to use methods that require an open wallet")

    # host
    parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0")

    # Now parse
    args = parser.parse_args()
    # print(args)

    if not args.port_rpc and not args.port_rest:
        print("Error: specify at least one of --port-rpc / --port-rest")
        parser.print_help()
        return

    if args.port_rpc == args.port_rest:
        print("Error: --port-rpc and --port-rest cannot be the same")
        parser.print_help()
        return

    if args.logfile and (args.syslog or args.syslog_local):
        print("Error: Cannot only use logfile or syslog at once")
        parser.print_help()
        return

    # Setting the datadir must come before setting the network, else the wrong path is checked at net setup.
    if args.datadir:
        settings.set_data_dir(args.datadir)

    # Network configuration depending on command line arguments. By default, the testnet settings are already loaded.
    if args.config:
        settings.setup(args.config)
    elif args.mainnet:
        settings.setup_mainnet()
    elif args.testnet:
        settings.setup_testnet()
    elif args.privnet:
        settings.setup_privnet()
    elif args.coznet:
        settings.setup_coznet()

    if args.maxpeers:
        try:
            settings.set_max_peers(args.maxpeers)
            print("Maxpeers set to ", args.maxpeers)
        except ValueError:
            print("Please supply a positive integer for maxpeers")
            return  

    if args.syslog or args.syslog_local is not None:
        # Setup the syslog facility
        if args.syslog_local is not None:
            print("Logging to syslog local%s facility" % args.syslog_local)
            syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local
        else:
            print("Logging to syslog user facility")
            syslog_facility = SysLogHandler.LOG_USER

        # Setup logzero to only use the syslog handler
        logzero.syslog(facility=syslog_facility)
    else:
        # Setup file logging
        if args.logfile:
            logfile = os.path.abspath(args.logfile)
            if args.disable_stderr:
                print("Logging to logfile: %s" % logfile)
            else:
                print("Logging to stderr and logfile: %s" % logfile)
            logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr)

        else:
            print("Logging to stdout and stderr")

    if args.wallet:
        if not os.path.exists(args.wallet):
            print("Wallet file not found")
            return

        passwd = os.environ.get('NEO_PYTHON_JSONRPC_WALLET_PASSWORD', None)
        if not passwd:
            passwd = prompt("[password]> ", is_password=True)

        password_key = to_aes_key(passwd)
        try:
            wallet = UserWallet.Open(args.wallet, password_key)

        except Exception as e:
            print(f"Could not open wallet {e}")
            return
    else:
        wallet = None

    # Disable logging smart contract events
    settings.set_log_smart_contract_events(False)

    # Write a PID file to easily quit the service
    write_pid_file()

    # Setup Twisted and Klein logging to use the logzero setup
    observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER)
    globalLogPublisher.addObserver(observer)

    def loopingCallErrorHandler(error):
        logger.info("Error in loop: %s " % error)

    # Instantiate the blockchain and subscribe to notifications
    blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
    Blockchain.RegisterBlockchain(blockchain)

    start_block_persisting()

    # If a wallet is open, make sure it processes blocks
    if wallet:
        walletdb_loop = task.LoopingCall(wallet.ProcessBlocks)
        wallet_loop_deferred = walletdb_loop.start(1)
        wallet_loop_deferred.addErrback(loopingCallErrorHandler)

    # Setup twisted reactor, NodeLeader and start the NotificationDB
    reactor.suggestThreadPoolSize(15)
    NodeLeader.Instance().Start()
    NotificationDB.instance().start()

    # Start a thread with custom code
    d = threading.Thread(target=custom_background_code)
    d.setDaemon(True)  # daemonizing the thread will kill it when the main thread is quit
    d.start()

    if args.port_rpc:
        logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc))
        try:
            rpc_class = load_class_from_path(settings.RPC_SERVER)
        except ValueError as err:
            logger.error(err)
            sys.exit()
        api_server_rpc = rpc_class(args.port_rpc, wallet=wallet)

        endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host)
        endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource()))

    if args.port_rest:
        logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest))
        try:
            rest_api = load_class_from_path(settings.REST_SERVER)
        except ValueError as err:
            logger.error(err)
            sys.exit()
        api_server_rest = rest_api()
        endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host)
        endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource()))

    reactor.addSystemEventTrigger('before', 'shutdown', stop_block_persisting)
    reactor.run()

    # After the reactor is stopped, gracefully shutdown the database.
    logger.info("Closing databases...")
    NotificationDB.close()
    Blockchain.Default().Dispose()
    NodeLeader.Instance().Shutdown()
    if wallet:
        wallet.Close()
示例#55
0
                else:
                    print('I\'m not aware of the method you specified.')
        else:
            print('The entity you specified was not in those I am aware of.')

    def test(self):
        """List out some information about our entities and inputs."""
        parser = argparse.ArgumentParser()
        parser.add_argument("--args",
                            type=str,
                            nargs='+',
                            help='pytest args to pass in. (--args="-r a")')
        args = parser.parse_args(sys.argv[2:])
        if args.args:
            pyargs = args.args
        else:
            pyargs = ['-q']
        pytest.cmdline.main(args=pyargs)

    def __repr__(self):
        return None


if __name__ == '__main__':
    try:
        Main()
    except KeyboardInterrupt:
        logger.warning('Rizza stopped by user.')
    except Exception as err:
        logger.error(err)
        def loop_validation_and_stepinto():
            while self._VMState & VMState.HALT == 0 and self._VMState & VMState.FAULT == 0:

                try:

                    self.gas_consumed = self.gas_consumed + (self.GetPrice() *
                                                             self.ratio)
                #                print("gas consumeb: %s " % self.gas_consumed)
                except Exception as e:
                    logger.error("Exception calculating gas consumed %s " % e)
                    self._VMState |= VMState.FAULT
                    return False

                if not self.testMode and self.gas_consumed > self.gas_amount:
                    logger.error("NOT ENOUGH GAS")
                    self._VMState |= VMState.FAULT
                    return False

                if not self.CheckItemSize():
                    logger.error("ITEM SIZE TOO BIG")
                    self._VMState |= VMState.FAULT
                    return False

                if not self.CheckStackSize():
                    logger.error("STACK SIZE TOO BIG")
                    self._VMState |= VMState.FAULT
                    return False

                if not self.CheckArraySize():
                    logger.error("ARRAY SIZE TOO BIG")
                    self._VMState |= VMState.FAULT
                    return False

                if not self.CheckInvocationStack():
                    logger.error("INVOCATION SIZE TO BIIG")
                    self._VMState |= VMState.FAULT
                    return False

                if not self.CheckDynamicInvoke():
                    logger.error("Dynamic invoke without proper contract")
                    self._VMState |= VMState.FAULT
                    return False

                self.StepInto()
示例#57
0
# Main program
start_time = datetime.datetime.now(
)  # Datetime variable to store the start time
now_time = datetime.datetime.now(
)  # Datetime variable to store the current time

sense.set_pixels(matrix_Run())  # Displaying our logo

while (now_time < start_time + datetime.timedelta(minutes=175)):
    try:
        lat, lon, direct1, direct2 = get_latlon(
        )  # Getting latitude and longitude

        water_per, cloud_per, land_per, season = image_processing()

        # Saving the data to the file
        logger.info("%s,%s,%s,%s,%s,%s,%s,%s,%s", photo_counter, lat, lon,
                    direct1, direct2, water_per, cloud_per, land_per, season)

        sleep(12)
        sense.set_pixels(matrix_Run())
        sleep(6)

        photo_counter += 1
        now_time = datetime.datetime.now()  # Updating current time
    except Exception as e:
        logger.error("Experiment error: " + str(e))

sense.clear()
示例#58
0
 def __init__(self,
              cfg,
              releases=[],
              zreleases=[],
              output_dir=None,
              user=None,
              password=None):
     super().__init__(cfg)
     if not output_dir:
         output_dir = mkdtemp(prefix="container_prepare_templates")
     self.org = Org(cfg, name=self._cfg.satellite["default_org"])
     releases = self.read_releases(releases, zreleases)
     base = {'parameter_defaults': {'ContainerImagePrepare': []}}
     base_param = base["parameter_defaults"]
     registry_url = f"{self._cfg.satellite['host']}:5000"
     if user and password:
         base_param["ContainerImageRegistryCredentials"] = {
             registry_url: {
                 user: password
             }
         }
         base_param["ContainerImageRegistryLogin"] = True
     base_param["DockerInsecureRegistryAddress"] = registry_url
     for release, r in releases.items():
         if r["container"]:
             cvs = ContentViews(self._cfg, self.org).get_by_releases(
                 release, zreleases)
             cvs = list(filter(lambda x: x.composite, cvs))
             self._build_repo_cache(cvs)
             log.info(
                 f"Fetching all repo informations for content-views ({len(cvs)})"
             )
             for z, containers in r["zstream"].items():
                 cv = list(
                     filter(lambda x: x.name.endswith(f"{release}-{z}"),
                            cvs))
                 if not len(cv):
                     log.error(f"Unable to find CV for {release}-{z}")
                     continue
                 else:
                     cv = cv[0]
                     log.info(f"Checking repos in {cv.name}")
                 content = deepcopy(base)
                 clist = content["parameter_defaults"][
                     "ContainerImagePrepare"]
                 latest_tag = f"{release}{z}".replace("z", ".").replace(
                     "OSP", "")
                 filename = f"container-image-prepare.{latest_tag}.yaml"
                 template = os.path.join(output_dir, filename)
                 if z == "latest":
                     latest_tag = "latest"
                 excludes = []
                 # removing ceph
                 noceph_containers = self.filter_containers(
                     containers, 'openstack-|rhel')
                 for container, tag in noceph_containers.items():
                     log.debug(f"Container {container} Tag: {tag}")
                     container_name = sub('^openstack-', '', container)
                     # We need to include/exclude with some kind of end-of-pattern string (:)
                     # because of this bug: https://bugzilla.redhat.com/show_bug.cgi?id=1853354
                     # Other wise, "heat-api" will also match "heat-api-cfn" for example.
                     excludes.append(f"{container_name}:")
                     repo = self.get_repo_by_name(cv, container_name)
                     if repo:
                         prefix = repo.container_repository_name.replace(
                             container_name, "")
                         clist.append({
                             "includes": [f"{container_name}:"],
                             "push_destination": False,
                             "set": {
                                 "name_prefix": prefix,
                                 "name_suffix": '',
                                 "namespace": self.get_namespace(repo),
                                 "tag": tag
                             }
                         })
                 ceph_container = self.filter_containers(
                     containers, r["container_ceph_prefix"])
                 log.info(ceph_container)
                 if len(ceph_container):
                     ceph_name = list(ceph_container.keys())[0]
                     ceph_tag = list(ceph_container.values())[0]
                     ceph = self.get_repo_by_name(cv, ceph_name)
                     ceph_image = ''
                     if ceph:
                         ceph_image = ceph.container_repository_name
                 else:
                     ceph_tag = ''
                     ceph_image = ''
                 namespace = self.get_namespace(repo)
                 clist.append({
                     "excludes": excludes,
                     "push_destination": False,
                     "set": {
                         "ceph_image": ceph_image,
                         "ceph_namespace": namespace,
                         "ceph_tag": ceph_tag,
                         "name_prefix": prefix,
                         "name_suffix": '',
                         "namespace": namespace,
                         "tag": latest_tag
                     }
                 })
                 now = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
                 with os.fdopen(
                         os.open(template,
                                 os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
                                 0o666), 'w') as f:
                     f.write(
                         f"# File {filename} was prepared by forge for OSP {release}{z}"
                     )
                     f.write(f"# Generated on {now}")
                     f.write("# https://github.com/valleedelisle/forge")
                     yaml.dump(content, f, default_flow_style=False)
                 log.info(f"generated {filename}")
示例#59
0
 def error(message):
     logger.error(message)
示例#60
0
async def deepl_tr(
    text: str,
    from_lang: str = "auto",
    to_lang: str = "zh",
    page=None,
    verbose: Union[bool, int] = False,
    timeout: float = 5,
):
    # fmt: on
    """Deepl via pyppeteer.

    text = "Test it and more"
    from_lang="auto"
    to_lang="zh"
    page=PAGE
    verbose=True
    """
    #

    # set verbose=40 to turn most things off
    if isinstance(verbose, bool):
        if verbose:
            logzero.setup_default_logger(level=10)
        else:
            logzero.setup_default_logger(level=20)
    else:  # integer: log_level
        logzero.setup_default_logger(level=verbose)

    logger.debug(" Entry ")

    if page is None:
        try:
            # browser = await get_ppbrowser()
            browser = await pyppeteer.launch()
        except Exception as exc:
            logger.error(exc)
            raise

        try:
            page = await browser.newPage()
        except Exception as exc:
            logger.error(exc)
            raise

        url = r"https://www.deepl.com/translator"
        try:
            await page.goto(url, timeout=45 * 1000)
        except Exception as exc:
            logger.error(exc)
            raise

    url0 = f"{URL}#{from_lang}/{to_lang}/"

    url_ = f"{URL}#{from_lang}/{to_lang}/{quote(text)}"

    # selector = ".lmt__language_select--target > button > span"

    if verbose < 11 or verbose is True:
        _ = False  # silent
    else:
        _ = True
    with CodeTimer(name="fetching", unit="s", silent=_):
        _ = """
        await page.goto(url0)

        try:
            await page.waitForSelector(selector, timeout=8000)
        except Exception as exc:
            raise
        # """

        try:
            content = await page.content()
        except Exception as exc:
            logger.error(exc)
            raise

        doc = pq(content)
        text_old = doc('#source-dummydiv').text()
        logger.debug("Old source: %s", text_old)

        try:
            deepl_tr.first_run
        except AttributeError:
            deepl_tr.first_run = 1
            text_old = "_some unlikely random text_"

        # selector = "div.lmt__translations_as_text"
        if text.strip() == text_old.strip():
            logger.debug(" ** early result: ** ")
            logger.debug("%s, %s", text,
                         doc('.lmt__translations_as_text__text_btn').text())
            doc = pq(await page.content())
            content = doc('.lmt__translations_as_text__text_btn').text()
        else:
            # record content
            try:
                # page.goto(url_)
                await page.goto(url0)
            except Exception as exc:
                logger.error(exc)
                raise

            try:
                await page.waitForSelector(".lmt__translations_as_text",
                                           timeout=20000)
            except Exception as exc:
                logger.error(exc)
                raise

            doc = pq(await page.content())
            content_old = doc('.lmt__translations_as_text__text_btn').text()

            # selector = ".lmt__translations_as_text"
            # selector = ".lmt__textarea.lmt__target_textarea.lmt__textarea_base_style"

            # selector = ".lmt__textarea.lmt__target_textarea"
            # selector = '.lmt__translations_as_text__text_btn'
            try:
                await page.goto(url_)
            except Exception as exc:
                logger.error(exc)
                raise

            try:
                await page.waitForSelector(".lmt__translations_as_text",
                                           timeout=20000)
            except Exception as exc:
                logger.error(exc)
                raise

            doc = pq(await page.content())
            content = doc('.lmt__translations_as_text__text_btn').text()

            logger.debug("content_old: [%s], \n\t content: [%s]", content_old,
                         content)

            # loop until content changed
            idx = 0
            # bound = 50  # 5s
            while idx < timeout / 0.1:
                idx += 1
                await asyncio.sleep(.1)
                doc = pq(await page.content())
                content = doc('.lmt__translations_as_text__text_btn').text()
                logger.debug("content_old: (%s), \n\tcontent: (%s)",
                             content_old, content)

                if content_old != content and bool(content):
                    break

            logger.debug(" loop: %s", idx)

    logger.debug(" Fini ")

    await page.close()
    await browser.close()
    browser.process.communicate()

    return content