예제 #1
0
    def OnUpdatedMaxPeers(self, old_value, new_value):

        if new_value < old_value:
            num_to_disconnect = old_value - new_value
            logger.warning("DISCONNECTING %s Peers, this may show unhandled error in defer " % num_to_disconnect)
            for p in self.Peers[-num_to_disconnect:]:
                p.Disconnect()
        elif new_value > old_value:
            for p in self.Peers:
                p.RequestPeerInfo()
예제 #2
0
def install(ctx):
    """Installs virtual environments and requirements."""
    with ctx.prefix(ACTIVATE):
        log.info("install reqs from conda")
        ctx.run("conda install --file requirements.jupyter.txt --yes")
        ctx.run("conda install --file requirements.dev.txt --yes")

        log.info("install reqs from pip that have no conda pkg")
        ctx.run("pip install -r requirements.pip.txt")
        ctx.run("pip install -r requirements.dev.pip.txt")

        log.info("install this code to allow importing from jupyter notebooks")
        ctx.run("pip install -e .")

        log.info("install this code to allow importing from jupyter notebooks")
        ctx.run(
            f"""python -m ipykernel install --sys-prefix --name {CONDA_ENV_NAME} --display-name "{CONDA_ENV_NAME}" """
        )
        try:
            ctx.run("jupyter labextension install @jupyterlab/toc")
        except Exception as err:
            log.warning(f"Skipped install of jupyterlab-toc: \n{err}")
예제 #3
0
    def _run(
        self,
        strategy: Strategy,
        schedule: Schedule,  # noqa: C901
        experiment: Experiment,
        journal: Journal,
        configuration: Configuration,
        secrets: Secrets,
        settings: Settings,
        event_registry: EventHandlerRegistry,
    ) -> None:
        experiment["title"] = substitute(experiment["title"], configuration, secrets)
        logger.info("Running experiment: {t}".format(t=experiment["title"]))

        started_at = time.time()
        journal = journal or initialize_run_journal(experiment)
        event_registry.started(experiment, journal)

        control = Control()
        activity_pool, rollback_pool = get_background_pools(experiment)
        hypo_pool = get_hypothesis_pool()
        continuous_hypo_event = threading.Event()

        dry = experiment.get("dry", False)
        if dry:
            logger.warning("Dry mode enabled")

        initialize_global_controls(experiment, configuration, secrets, settings)
        initialize_controls(experiment, configuration, secrets)

        logger.info("Steady-state strategy: {}".format(strategy.value))
        rollback_strategy = (
            settings.get("runtime", {}).get("rollbacks", {}).get("strategy", "default")
        )
        logger.info("Rollbacks strategy: {}".format(rollback_strategy))

        exit_gracefully_with_rollbacks = True
        with_ssh = has_steady_state_hypothesis_with_probes(experiment)
        if not with_ssh:
            logger.info(
                "No steady state hypothesis defined. That's ok, just " "exploring."
            )

        try:
            try:
                control.begin(
                    "experiment", experiment, experiment, configuration, secrets
                )

                state = object()
                if with_ssh and should_run_before_method(strategy):
                    state = run_gate_hypothesis(
                        experiment, journal, configuration, secrets, event_registry, dry
                    )

                if state is not None:
                    if with_ssh and should_run_during_method(strategy):
                        run_hypothesis_during_method(
                            hypo_pool,
                            continuous_hypo_event,
                            strategy,
                            schedule,
                            experiment,
                            journal,
                            configuration,
                            secrets,
                            event_registry,
                            dry,
                        )

                    state = run_method(
                        strategy,
                        activity_pool,
                        experiment,
                        journal,
                        configuration,
                        secrets,
                        event_registry,
                        dry,
                    )

                    continuous_hypo_event.set()
                    if journal["status"] not in ["interrupted", "aborted"]:
                        if (
                            with_ssh
                            and (state is not None)
                            and should_run_after_method(strategy)
                        ):
                            run_deviation_validation_hypothesis(
                                experiment,
                                journal,
                                configuration,
                                secrets,
                                event_registry,
                                dry,
                            )
            except InterruptExecution as i:
                journal["status"] = "interrupted"
                logger.fatal(str(i))
                event_registry.interrupted(experiment, journal)
            except KeyboardInterrupt:
                journal["status"] = "interrupted"
                logger.warning("Received a termination signal (Ctrl-C)...")
                event_registry.signal_exit()
            except SystemExit as x:
                journal["status"] = "interrupted"
                logger.warning("Received the exit signal: {}".format(x.code))

                exit_gracefully_with_rollbacks = x.code != 30
                if not exit_gracefully_with_rollbacks:
                    logger.warning("Ignoring rollbacks as per signal")
                event_registry.signal_exit()
            finally:
                hypo_pool.shutdown(wait=True)

            # just in case a signal overrode everything else to tell us not to
            # play them anyway (see the exit.py module)
            if exit_gracefully_with_rollbacks:
                run_rollback(
                    rollback_strategy,
                    rollback_pool,
                    experiment,
                    journal,
                    configuration,
                    secrets,
                    event_registry,
                    dry,
                )

            journal["end"] = datetime.utcnow().isoformat()
            journal["duration"] = time.time() - started_at

            # the spec only allows these statuses, so if it's anything else
            # we override to "completed"
            if journal["status"] not in (
                "completed",
                "failed",
                "aborted",
                "interrupted",
            ):
                journal["status"] = "completed"

            has_deviated = journal["deviated"]
            status = "deviated" if has_deviated else journal["status"]
            logger.info("Experiment ended with status: {s}".format(s=status))
            if has_deviated:
                logger.info(
                    "The steady-state has deviated, a weakness may have been "
                    "discovered"
                )

            control.with_state(journal)
            try:
                control.end(
                    "experiment", experiment, experiment, configuration, secrets
                )
            except ChaosException:
                logger.debug("Failed to close controls", exc_info=True)
        finally:
            try:
                cleanup_controls(experiment)
                cleanup_global_controls()
            finally:
                event_registry.finish(journal)

        return journal
예제 #4
0
async def call(args: Dict[str, Any], cromwell: api.CromwellAPI) -> None:
    """Execute the subcommand.

    Args:
        args (Dict): Arguments parsed from the command line.
    """

    batches = True
    relative = None

    batches_relative = args.get("batches_relative")
    batches_absolute = args.get("batches_absolute")
    if batches_relative:
        batches = batches_relative
        relative = True
    elif batches_absolute:
        batches = batches_absolute
        relative = False

    workflows = await _workflows.get_workflows(
        cromwell,
        batches=batches,
        relative_batching=relative,
        batch_interval_mins=args.get("batch_interval_mins"),
    )

    aggregation: Dict[str, List[Any]] = defaultdict(list)
    for w in workflows:
        if "batch" in w:
            aggregation[w["batch"]].append(w)

    results = []

    if args.get("show_oliver_job_groups"):
        logger.warning(
            "You specified you'd like to see job group names. " +
            "This significantly increases runtime due to the need to query metadata about each workflow. "
            + "This may take a while!")

    for batch_num, batch_workflows in aggregation.items():
        r = {"Batch": batch_num, "# of Jobs": len(batch_workflows)}

        # workflow statuses
        statuses: Dict[str, int] = defaultdict(int)
        for status in [w.get("status") for w in batch_workflows]:
            statuses[status] += 1
        # pylint: disable=C0206
        r["Statuses"] = ", ".join(
            sorted([f"{key} ({statuses[key]})" for key in statuses.keys()]))

        # job groups
        if args.get("show_oliver_job_groups"):
            metadatas = {
                w.get("id"): await cromwell.get_workflows_metadata(w.get("id"))
                for w in batch_workflows
            }
            r["Job Groups"] = ", ".join(
                list({
                    oliver.get_oliver_group(metadatas.get(x.get("id"), {}))
                    for x in batch_workflows
                }))

        # start time
        _sorted_workflows = sorted(batch_workflows,
                                   key=lambda x: x["start"])  # type: ignore
        earliest_start_time = min([x.get("start") for x in _sorted_workflows])
        r["Start Time"] = reporting.localize_date(earliest_start_time)

        # end time
        end_times = [x.get("end") for x in _sorted_workflows]
        if None in end_times:
            r["End Time"] = "Still running"
        else:
            latest_end_time = max(end_times)
            r["End Time"] = reporting.localize_date(latest_end_time)

        results.append(r)

    reporting.print_dicts_as_table(results, grid_style=args.get("grid-style"))
예제 #5
0
    def __init__(self, path, skip_version_check=False, skip_header_check=False):
        super(LevelDBBlockchain, self).__init__()
        self._path = path

        self._header_index = []
        self._header_index.append(Blockchain.GenesisBlock().Header.Hash.ToBytes())

        self.TXProcessed = 0

        try:
            self._db = plyvel.DB(self._path, create_if_missing=True)
            logger.info("Created Blockchain DB at %s " % self._path)
        except Exception as e:
            logger.info("leveldb unavailable, you may already be running this process: %s " % e)
            raise Exception('Leveldb Unavailable')

        version = self._db.get(DBPrefix.SYS_Version)

        if skip_version_check:
            self._db.put(DBPrefix.SYS_Version, self._sysversion)
            version = self._sysversion

        if version == self._sysversion:  # or in the future, if version doesn't equal the current version...

            ba = bytearray(self._db.get(DBPrefix.SYS_CurrentBlock, 0))
            self._current_block_height = int.from_bytes(ba[-4:], 'little')

            if not skip_header_check:
                ba = bytearray(self._db.get(DBPrefix.SYS_CurrentHeader, 0))
                current_header_height = int.from_bytes(ba[-4:], 'little')
                current_header_hash = bytes(ba[:64].decode('utf-8'), encoding='utf-8')

                hashes = []
                try:
                    for key, value in self._db.iterator(prefix=DBPrefix.IX_HeaderHashList):
                        ms = StreamManager.GetStream(value)
                        reader = BinaryReader(ms)
                        hlist = reader.Read2000256List()
                        key = int.from_bytes(key[-4:], 'little')
                        hashes.append({'k': key, 'v': hlist})
                        StreamManager.ReleaseStream(ms)
                except Exception as e:
                    logger.info("Could not get stored header hash list: %s " % e)

                if len(hashes):
                    hashes.sort(key=lambda x: x['k'])
                    genstr = Blockchain.GenesisBlock().Hash.ToBytes()
                    for hlist in hashes:

                        for hash in hlist['v']:
                            if hash != genstr:
                                self._header_index.append(hash)
                            self._stored_header_count += 1

                if self._stored_header_count == 0:
                    logger.info("Current stored headers empty, re-creating from stored blocks...")
                    headers = []
                    for key, value in self._db.iterator(prefix=DBPrefix.DATA_Block):
                        dbhash = bytearray(value)[8:]
                        headers.append(Header.FromTrimmedData(binascii.unhexlify(dbhash), 0))

                    headers.sort(key=lambda h: h.Index)
                    for h in headers:
                        if h.Index > 0:
                            self._header_index.append(h.Hash.ToBytes())

                    # this will trigger the write of stored headers
                    if len(headers):
                        self.OnAddHeader(headers[-1])

                elif current_header_height > self._stored_header_count:

                    try:
                        hash = current_header_hash
                        targethash = self._header_index[-1]

                        newhashes = []
                        while hash != targethash:
                            header = self.GetHeader(hash)
                            newhashes.insert(0, header)
                            hash = header.PrevHash.ToBytes()

                        self.AddHeaders(newhashes)
                    except Exception as e:
                        pass

        elif version is None:
            self.Persist(Blockchain.GenesisBlock())
            self._db.put(DBPrefix.SYS_Version, self._sysversion)
        else:
            logger.error("\n\n")
            logger.warning("Database schema has changed from %s to %s.\n" % (version, self._sysversion))
            logger.warning("You must either resync from scratch, or use the np-bootstrap command to bootstrap the chain.")

            res = prompt("Type 'continue' to erase your current database and sync from new. Otherwise this program will exit:\n> ")
            if res == 'continue':

                with self._db.write_batch() as wb:
                    for key, value in self._db.iterator():
                        wb.delete(key)

                self.Persist(Blockchain.GenesisBlock())
                self._db.put(DBPrefix.SYS_Version, self._sysversion)

            else:
                raise Exception("Database schema changed")
예제 #6
0
async def fetch_myservices(
    page: pyppeteer.page.Page,
) -> Tuple[List[Optional[str]], List[Optional[str]]]:
    # fmt: on
    """Fetch my_services info and their update links if any.

    return info, links
    """
    if page.isClosed():
        logger.warning("Invalid page handle provided, return ([], [])...")
        return [], []

    # make sure we are on the right page
    if page.url != "https://www.noip.com/members/dns/":
        try:
            await asyncio.wait([
                page.goto("https://www.noip.com/members/dns/"),
                page.waitForNavigation()
            ])
        except Exception as exc:
            logger.error("%s, exiting", exc)
            return [str(exc)], [""]

    # retrieve Current Hostnames number
    content = await page.content()
    hostname_numb = re.findall(r"Current\s+Hostnames.*\d+", content)

    if not hostname_numb:
        _ = "Not logged in or noip has changed page layout"
        logger.warning("%s, return  ([], [])...", _)
        return [_], []
    # retrieve str inside the list
    (hostname_numb, ) = hostname_numb
    logger.info(hostname_numb)  # 'Current Hostnames: 2'

    try:
        numb = int(hostname_numb.split(":")[1])
    except Exception as exc:
        logger.error("%s, setting numb=0", exc)
        numb = 0

    if numb in [0]:  # no hostname configured
        return [hostname_numb], []

    # links
    # pq(content)('a.btn.btn-labeled.btn-configure').outerHtml()

    # [pq(elm).attr('href') for elm in pq(content)('a.btn.btn-labeled.btn-configure')]
    # ['host.php?host_id=77151845', 'host.php?host_id=77151476']

    links = re.findall(r"host.php\?host_id=\d+", content)
    # ['host.php?host_id=77151845', 'host.php?host_id=77151476']

    logger.debug("update links: %s", links)
    _ = "https://www.noip.com/members/dns/"
    full_links = [f"{_}{elm}" for elm in links]
    logger.info("update links (full): %s", full_links)

    # existing services
    _ = pq(content)(".my-services").text()
    logger.debug("existing services: \n%s", _)

    try:
        my_services = [*mit.chunked(_.splitlines()[3:], 4)]
        # my_services = [*mit.chunked(_[3:], 4)]
        my_services = [", ".join(elm[1:3]) for elm in my_services]
    except Exception as exc:
        logger.error(exc)
        my_services = [_.replace("Modify", "").replace("Action", "")]
        logger.info("my servies text: \n%s", my_services)

    return my_services, full_links
예제 #7
0
 async def run_forever(self):
     try:
         await self.init()
     except Exception as e:
         logger.warning("Init failed: %s", e)
예제 #8
0
    def plot_time_series(
        self,
        ax: plt.axes = None,
        show_plot: bool = False,
        show_xlabel: bool = True,
        use_scientific_notation: bool = False,
        from_date: dt = None,
        to_date: dt = None,
    ) -> plt.figure:
        """Plots the time series of the selected country

        Parameters
        ----------
        ax : matplotlib.pyplot axes object, optional
            axes object used for plotting, if not provided the function will create
            a figure with axes (default is None)
        show_plot : boolean, optional
            controls if the plot is shown at the end of the method call (default is False)
        show_xlabel : boolean, optional
            controls if the x-label 'Date' is plotted (default is True)
        use_scientific_notation : boolean, optional
            controls if the y-axis is plotted in scientific notatiom 1e... (default is False)
        from_date : datetime object, optional
            controls the start date for plotting (default is None)
        to_date : datetime object, optional
            controls the end date for plotting (default is None)
        Returns
        -------
        fig : matplotlib.pyplot figure object

        """
        if self.cv_data == None:
            logger.warning(
                "No data available, initialize self.cv_data with CDataTimeSeries object"
            )
            return
        if ax == None:
            fh = plt.figure(figsize=[10, 8])
            ax = fh.add_subplot(111)
        ixs, ixe = self.cv_data._get_time_range_indices(
            start_date=from_date, end_date=to_date
        )
        ax.plot(
            self.cv_data.days[ixs:ixe],
            self.cv_data.n_confirmed[ixs:ixe],
            color="red",
            label="total confirmed",
        )
        ax.plot(
            self.cv_data.days[ixs:ixe],
            self.cv_data.n_recovered[ixs:ixe],
            color="green",
            label="total recovered",
        )
        ax.plot(
            self.cv_data.days[ixs:ixe],
            self.cv_data.n_deaths[ixs:ixe],
            color="black",
            label="total deaths",
        )
        ax.plot(
            self.cv_data.days[ixs:ixe],
            self.cv_data.n_still_infected[ixs:ixe],
            color="blue",
            linewidth=2,
            label="still infected",
        )
        ax.grid(True)
        if show_xlabel:
            ax.set_xlabel("Date")
        ax.set_ylabel("Number of cases")
        ax.text(
            0.5,
            0.9,
            self.cv_data.country,
            horizontalalignment="center",
            verticalalignment="center",
            transform=ax.transAxes,
            fontsize=12,
            fontweight="bold",
            bbox=dict(facecolor="white", alpha=1.0, edgecolor="None"),
        )
        if use_scientific_notation:
            ax.ticklabel_format(style="scientific", axis="y", scilimits=(0, 3))
        self._nicely_format_date_ticks(ax)
        plt.legend()
        if show_plot:
            plt.show()
        if "fh" in locals():
            return fh
        return plt.gcf()
예제 #9
0
 def close_driver(self):
     logger.warning('Exit navigation.')
     self.driver.quit()
예제 #10
0
def _run(manual, download_all):
    """
    Method with the application logic.
    """
    dhis, briefcase, smartva, db = access()

    # if manual briefcase was provided
    if manual:
        smartva_file = smartva.run(manual, manual=True)
    else:
        briefcase_file = briefcase.download_briefcases(download_all)
        smartva_file = None
        # check if downloaded briefcase file has content
        if csv_with_content(briefcase_file):
            smartva_file = smartva.run(briefcase_file)
        else:
            os.remove(briefcase_file)

    success_count, error_count, duplicate_count, no_of_records = 0, 0, 0, 0
    if csv_with_content(smartva_file):
        no_of_records = sum(1 for _ in read_csv(smartva_file))

        for index, record in enumerate(read_csv(smartva_file), 1):
            logger.info("[{0}/{1}] SID: {2}".format(index, no_of_records, record.get('sid')))
            logger.debug("Parsed from CSV: {}".format(record))
            va, exc, warnings = verbal_autopsy_factory(record)
            logger.debug("VA data: {}".format(va))

            if warnings:
                [logger.warn(w) for w in warnings]

            if exc:
                [logger.error(e) for e in exc]
                db.write_errors(record, exc)
                error_count += 1
            else:
                event = Event(va)
                try:
                    dhis.is_duplicate(va.sid)
                except DuplicateEventImportError as e:
                    logger.warning("Record for ID {} already exists in DHIS2".format(record.get('sid')))
                    db.write_errors(record, e)
                    duplicate_count += 1
                else:
                    try:
                        dhis.post_event(event.payload)
                    except ImportException as e:
                        logger.exception("{}\nfor payload {}".format(e, event.payload))
                        db.write_errors(record, e)
                        error_count += 1
                    else:
                        logger.info("Import successful!")
                        success_count += 1

        logger.info("SUMMARY: Parsed ODK records: {} | "
                    "Imported: {} | "
                    "Duplicates: {} | "
                    "Errors: {}".format(
                        no_of_records,
                        success_count,
                        duplicate_count,
                        error_count))
    else:
        logger.warning("No new ODK records to process for time window {} - {}".format(*get_timewindow()))
예제 #11
0
        if '/.' in root or '__' in root:
            continue
        if files:
            logger.info(f'Scanning dir: {root}')
        for file_name in files:
            if not file_name.endswith('.py'):
                continue
            file_path = Path(root, file_name)

            # skip badly-sized files
            if not 100 < file_path.stat().st_size < 100_000:
                continue

            # skip Python 2 files
            if not compile_file(file_path, quiet=2):
                logger.warning(f'Skipping file: {file_path}')
                continue

            file_list.append(file_path)

    sample = random.sample(
        file_list,
        statistic_file_count + training_file_count + validation_file_count)
    with open(working_dir / 'statistic_list.txt', 'w') as f:
        f.writelines(f'{i}\n' for i in sample[:statistic_file_count])

    training_list_path = working_dir / 'training_list.txt'
    testing_list_path = working_dir / 'testing_list.txt'
    if training_file_count == 1 and validation_file_count == 1:
        with open(training_list_path, 'w') as f:
            f.writelines([f'{source_dir}/keras/keras/optimizers.py\n'])
예제 #12
0
from telegram import Message as TelegramMessage, ParseMode
from telegram.ext import ConversationHandler, run_async

import settings
import util
from appglobals import loop
from components.admin import notify_submittant_rejected, edit_bot
from models import Bot, Country, Suggestion, User
from models.revision import Revision
from util import track_groups

try:
    from components.userbot import BotChecker
    from botcheckerworker.botchecker import add_keywords, download_profile_picture
except:
    log.warning("Not using BotChecker in contributions.py")


def extract_bot_mentions(message: TelegramMessage):
    text = message.text

    matches = re.findall(settings.REGEX_BOT_IN_TEXT, text)
    pprint(matches)

    # If it ends in "bot", we can be sure it's a bot.
    # Other ones will be thrown away, assuming that we already have all the verified bots


def notify_bot_offline(bot, update, args=None):
    tg_user = update.message.from_user
    user = User.from_telegram_object(tg_user)
예제 #13
0
    return logs


if __name__ == "__main__":
    args = docopt(__doc__)

    log_level = logging.DEBUG if args["--debug"] else logging.INFO
    logzero.loglevel(log_level)
    logzero.formatter(logzero.LogFormatter(datefmt="%Y-%m-%d %H:%M:%S"))

    parsed_logs = {}

    log_pdftotext = f"{SCRIPTDIR}/run_parscit_pipeline.pdftotext.log"
    if not os.path.exists(log_pdftotext):
        log.warning(
            f"Couldn't find pdf2totext log!  (expected under: {log_pdftotext})"
        )
    else:
        with open(log_pdftotext, "r") as f:
            parsed_logs["pdftotext"] = gather_pdftotext_log(f)

    log_parscit = f"{SCRIPTDIR}/run_parscit_pipeline.parscit.log"
    if not os.path.exists(log_parscit):
        log.error(
            f"Couldn't find ParsCit log!  (expected under: {log_parscit})")
        exit(1)
    else:
        with open(log_parscit, "r") as f:
            all_ids, parsed_logs["parscit"] = gather_parscit_log(f)

    log_parsetei = f"{SCRIPTDIR}/run_parscit_pipeline.tei.log"
예제 #14
0
def deploy_pod(secrets, ns, body):
    api = create_k8s_api_client(secrets)
    v1 = client.CoreV1Api(api)
    logger.warning("Deploy Pod to {ns} namespace".format(ns=ns))

    v1.create_namespaced_pod(ns, body=body)
예제 #15
0
def deploy_deployment(secrets, ns, body):
    api = create_k8s_api_client(secrets)
    v1 = client.AppsV1beta1Api(api)
    logger.warning("Deploy Deployment to {ns} namespace".format(ns=ns))
    v1.create_namespaced_deployment(ns, body=body)
예제 #16
0
            ignoreDefaultArgs=[
                "--enable-automation",  # set window.navigator.webdriver to undefined
            ],
            executablePath=executable_path,  # use chrome
            # autoClose=False,
            headless=headless,
            # devtools=devtools,  # replace headless
            dumpio=True,
            # userDataDir=".",
            userDataDir=tempdir,
        )
    except Exception as exc:
        logger.error("get_ppbrowser exc: %s", exc)
        raise
    # page = await browser.newPage()
    # await page.goto(url)
    # logger.debug("page.goto deepl time: %.2f s", default_timer() - then)
    return browser


try:
    BROWSER = LOOP.run_until_complete(get_ppbrowser(not HEADFUL))
except Exception as exc:
    logger.error(" Unable to pyppeteer.launch exc: %s", exc)
    logger.info(
        "\n\t%s",
        r"Possible cause: abnormal exit from a previous session. Try `taskkill /f /im chrome.exe`",
    )
    logger.warning(" %s", "Note that this will also kill your chrome browser.")
    raise SystemExit(1)
예제 #17
0
    def build_next_actions(self,
                           context: Context) -> Union[ResponseComposer, None]:
        u = context.last_user_utterance

        if u is None:
            return

        composer = self._create_composer(context)

        text = f'"{u.text[:50]}"' if u.text else ''
        log.info(f'Incoming message: {text}, {u}')
        log.debug(f'Current dialog states: {context.dialog_states}')

        # Execute every matching stateless handler first
        for handler in self.router.iter_stateless_matches(u):
            try:
                if handler.callback(composer, context):
                    log.debug(f"Stateless handler triggered: {handler}")
            except StopPropagation:
                # Some handlers may stop the propagation of the update through the chain of state handlers
                if composer.is_empty:
                    log.error(
                        "StopPropagation was raised but no chat actions were constructed."
                    )
                    return
                self._log_actions(composer)
                return composer

        next_state = None
        # Dialog states are a priority queue
        for state in context.dialog_states.iter_states():
            # Find exactly one handler in any of the prioritized states
            handler = self.router.find_matching_state_handler(state, u)

            if handler is None:
                continue

            next_state = handler.callback(composer, context)
            log.info(f"State handler triggered: {handler}")
            handler_found = True
            break
        else:
            # If no handler was found in any of the states, try the fallbacks
            handler = self.router.get_fallback_handler(u)

            if handler is not None:
                next_state = handler.callback(composer, context)
                log.info(f"Fallback handler triggered: {handler}")
                handler_found = True
            else:
                log.warning(
                    f"No matching rule found in dialog_states "
                    f"{list(context.dialog_states.iter_states())} with intent "
                    f"{u.intent} and parameters {u.parameters}.")
                handler_found = False

        if not handler_found:
            if u.intent == 'fallback':
                excuse_did_not_understand(composer, context)
                log.debug(f'Incoming message was not understood: "{u.text}"')
                log.debug("Not updating state lifetimes.")
                return composer
            else:
                next_state = no_rule_found(composer, context)

        if isinstance(next_state, ResponseComposer):
            # Lambdas return the sentence composer, which we don't need (call by reference)
            next_state = None

        if next_state is not None:
            # Handlers return a tuple with the next state, with an integer determining the lifetime of this state as
            # the last tuple value.
            # For example: `("asking", "do_something", 3)`  <-- lifetime of 3 incoming utterances
            context.dialog_states.put(next_state)
            return composer

        self._log_actions(composer)
        return composer
예제 #18
0
    def plot_country_comparison(
        self,
        country_name_1: str,
        country_name_2: str,
        ax: plt.axes = None,
        show_plot: bool = False,
        from_date: dt = None,
        to_date: dt = None,
    ) -> plt.figure:
        """Plot the time series curves of two selected countries from a collection
        into one plot for comparison purposes.
        Parameters
        ----------
        country_name_1 : str
            Name of the first country selected for compare
        country_name_2 : str
            Name of the second country selected for compare
        from_date : datetime object, optional
            controls the start date for plotting (default is None)
        to_date : datetime object, optional
            controls the end date for plotting (default is None)
        ax : matplotlib.pyplot axes object, optional
            axes object used for plotting, if not provided the function will create
            a figure with axes (default is None)
        show_plot : boolean, optional
            controls if the plot is shown at the end of the method call (default is False)
        Returns
        -------
        fig : matplotlib.pyplot figure object
        """
        if self.cv_data_collection == None:
            logger.warning(
                "No collection available, initialize self.cv_data_collection with CDataTimeSeriesCollection object"
            )
            return
        ds1 = self.cv_data_collection._get_data_from_country_name(country_name_1)
        ds2 = self.cv_data_collection._get_data_from_country_name(country_name_2)
        if not ds1 or not ds2:
            logger.info("Country does not exist")
            return
        if ax == None:
            fh = plt.figure(figsize=(10, 7))
            ax = fh.add_subplot(111)

        ixs1, ixe1 = ds1._get_time_range_indices(start_date=from_date, end_date=to_date)
        ixs2, ixe2 = ds2._get_time_range_indices(start_date=from_date, end_date=to_date)
        ax.plot(
            ds1.days[ixs1:ixe1],
            ds1.n_confirmed[ixs1:ixe1],
            color="red",
            linewidth=2,
            label=ds1.country + " confirmed",
        )
        ax.plot(
            ds2.days[ixs2:ixe2],
            ds2.n_confirmed[ixs2:ixe2],
            color="darkred",
            linestyle="-.",
            label=ds2.country + " confirmed",
        )
        ax.plot(
            ds1.days[ixs1:ixe1],
            ds1.n_recovered[ixs1:ixe1],
            color="green",
            linewidth=2,
            label=ds1.country + " recovered",
        )
        ax.plot(
            ds2.days[ixs2:ixe2],
            ds2.n_recovered[ixs2:ixe2],
            color="darkgreen",
            linestyle="-.",
            label=ds2.country + " recovered",
        )
        ax.plot(
            ds1.days[ixs1:ixe1],
            ds1.n_deaths[ixs1:ixe1],
            color="darkgrey",
            linewidth=2,
            label=ds1.country + " deaths",
        )
        ax.plot(
            ds2.days[ixs2:ixe2],
            ds2.n_deaths[ixs2:ixe2],
            color="black",
            linestyle="-.",
            label=ds2.country + " deaths",
        )
        ax.plot(
            ds1.days[ixs1:ixe1],
            ds1.n_still_infected[ixs1:ixe1],
            color="blue",
            linewidth=2,
            label=ds1.country + " still infected",
        )
        ax.plot(
            ds2.days[ixs2:ixe2],
            ds2.n_still_infected[ixs2:ixe2],
            color="darkblue",
            linestyle="-.",
            label=ds2.country + " still infected",
        )

        ax.grid(True)
        ax.set_xlabel("Date")
        ax.set_ylabel("Cases")
        plt.legend()
        ax.text(
            0.6,
            0.9,
            ds1.country + " vs. " + ds2.country,
            horizontalalignment="center",
            verticalalignment="center",
            transform=ax.transAxes,
            fontsize=12,
            fontweight="bold",
            bbox=dict(facecolor="white", alpha=1.0, edgecolor="None"),
        )
        CDataTimeSeriesView._nicely_format_date_ticks(ax)

        if show_plot:
            plt.show()
        if "fh" in locals():
            return fh
        return plt.gcf()
예제 #19
0
from logzero import setup_logger
from logzero import logger
from PIL import Image

from deprecated import deprecated
import adbutils
import uiautomator2
from uiautomator2.exceptions import XPathElementNotFoundError
from uiautomator2.utils import U
from uiautomator2.abcd import BasicUIMeta

try:
    from lxml import etree
except ImportError:
    logger.warning("lxml was not installed, xpath will not supported")


def safe_xmlstr(s):
    return s.replace("$", "-")


def init():
    uiautomator2.plugin_register("xpath", XPath)


def string_quote(s):
    """ quick way to quote string """
    return "{!r}".format(s)

예제 #20
0
    def MakeTransaction(self,
                        tx,
                        change_address=None,
                        fee=Fixed8(0),
                        from_addr=None,
                        use_standard=False,
                        watch_only_val=0,
                        exclude_vin=None,
                        use_vins_for_asset=None):
        """
        This method is used to to calculate the necessary TransactionInputs (CoinReferences) and TransactionOutputs to
        be used when creating a transaction that involves an exchange of system assets, ( NEO, Gas, etc ).

        Args:
            tx (Transaction): The Transaction to be used.
            change_address (UInt160): The address any change for the transaction should be returned to.
            fee (Fixed8): A fee to be attached to the Transaction for network processing purposes.
            from_addr (UInt160): If present, all CoinReferences selected will only come from this address.
            use_standard (bool): If true, only CoinReferences from standard addresses ( not contracts that are smart contracts ) will be used.
            watch_only_val (int): 0 or CoinState.WATCH_ONLY, if present only choose coins that are in a WatchOnly address.
            exclude_vin (list): A list of CoinReferences to NOT use in the making of this tx.
            use_vins_for_asset (list): A list of CoinReferences to use.

        Returns:
            tx: (Transaction) Returns the transaction with oupdated inputs and outputs.
        """

        tx.ResetReferences()
        tx.ResetHashData()

        if not tx.outputs:
            tx.outputs = []
        if not tx.inputs:
            tx.inputs = []

        fee = fee + (tx.SystemFee() * Fixed8.FD())

        #        pdb.set_trace()

        paytotal = {}
        if tx.Type != int.from_bytes(TransactionType.IssueTransaction,
                                     'little'):

            for key, group in groupby(tx.outputs, lambda x: x.AssetId):
                sum = Fixed8(0)
                for item in group:
                    sum = sum + item.Value
                paytotal[key] = sum
        else:
            paytotal = {}

        if fee > Fixed8.Zero():

            if Blockchain.SystemCoin().Hash in paytotal.keys():
                paytotal[Blockchain.SystemCoin().
                         Hash] = paytotal[Blockchain.SystemCoin().Hash] + fee
            else:
                paytotal[Blockchain.SystemCoin().Hash] = fee

        paycoins = {}

        self._vin_exclude = exclude_vin

        for assetId, amount in paytotal.items():

            if use_vins_for_asset is not None and len(
                    use_vins_for_asset
            ) > 0 and use_vins_for_asset[1] == assetId:
                paycoins[assetId] = self.FindCoinsByVins(use_vins_for_asset[0])
            else:
                paycoins[assetId] = self.FindUnspentCoinsByAssetAndTotal(
                    assetId,
                    amount,
                    from_addr=from_addr,
                    use_standard=use_standard,
                    watch_only_val=watch_only_val)

        self._vin_exclude = None

        for key, unspents in paycoins.items():
            if unspents is None:
                if not self.IsSynced:
                    logger.warning(
                        "Wait for your wallet to be synced before doing "
                        "transactions. To check enter 'wallet' and look at "
                        "'percent_synced', it should be 100. Also the blockchain "
                        "should be up to the latest blocks (see Progress). Issuing "
                        "'wallet rebuild' restarts the syncing process.")
                    return None

                else:
                    logger.error("insufficient funds for asset id: %s " % key)
                    return None

        input_sums = {}

        for assetId, unspents in paycoins.items():
            sum = Fixed8(0)
            for coin in unspents:
                sum = sum + coin.Output.Value
            input_sums[assetId] = sum

        if not change_address:
            change_address = self.GetChangeAddress(from_addr=from_addr)

        new_outputs = []

        for assetId, sum in input_sums.items():
            if sum > paytotal[assetId]:
                difference = sum - paytotal[assetId]
                output = TransactionOutput(AssetId=assetId,
                                           Value=difference,
                                           script_hash=change_address)
                new_outputs.append(output)

        inputs = []

        for item in paycoins.values():
            for ref in item:
                inputs.append(ref.Reference)

        tx.inputs = inputs
        tx.outputs = tx.outputs + new_outputs

        return tx
예제 #21
0
    'pytest',
    'pytest-cov',
    'setuptools_scm',
    'flake8',
]

url_template = (
    'https://docs.anaconda.com/anaconda/packages/old-pkg-lists/{ANACONDA_VERSION}/py{PYTHON_VERSION}_linux-64'  # noqa
)
url = url_template.format(ANACONDA_VERSION=ANACONDA_VERSION,
                          PYTHON_VERSION=PYTHON_VERSION)
response = requests.get(url)

if not response.ok:
    logger.warning(
        'Version {} of Anaconda not found, defaulting to latest version.'.
        format(ANACONDA_VERSION))
    url = LATEST_URL

# Scrape Data
package_table = pd.read_html(url, header=0)[0]
subset = package_table.query('Name in @packages')
package_versions = (subset.Name + '=' + subset.Version).tolist()

# Insert into a docker file using a jinja template
template_string = r"""
FROM mangothecat/minicondabuild:latest

RUN conda install -y \
{%- for package in packages %}
        {{package}} \
예제 #22
0
            f'Baseline accuracy: {successful_count / len(test_indices):.2%} '
            f'with {feature_mask.sum()} features')

        importances = model.feature_importances_
        argsort = importances.argsort()

        # remove features with almost 0 importance
        new_feature_mask = feature_mask.copy()
        secondary_mask = np.ones(feature_mask.sum(), dtype='?')
        for a in argsort:
            if importances[a] < 0.00001:
                secondary_mask[a] = 0
                new_feature_mask[feature_mask] = secondary_mask
                dirty = True
                logger.warning(
                    f'Feature #{a:<3}: {feature_names[feature_mask][a]} removed for '
                    f'{importances[a]:.4%} importance.')
        if dirty:
            model.fit(X[:, new_feature_mask], y)
            _, new_successful_count = test_model(model, Xt[:,
                                                           new_feature_mask],
                                                 yt, test_indices)
            feature_mask = new_feature_mask
            successful_count = new_successful_count
            continue

        # remove features if the accuracy increases after removal
        for i, a in enumerate(argsort):
            new_feature_mask = feature_mask.copy()
            secondary_mask = np.ones(feature_mask.sum(), dtype='?')
            secondary_mask[a] = 0
예제 #23
0
        elif kwargs.get("templates"):
            templates = list(
                {
                    tmpl
                    for tmpl in self.execute(
                        workflow="list-templates", artifacts="last"
                    )["data_out"]["list_templates"]
                }
            )
            templates.sort(reverse=True)
            if (res_filter := kwargs.get("results_filter")) :
                templates = results_filter(templates, res_filter)
            templates = "\n".join(templates[:results_limit])
            logger.info(f"Available templates:\n{templates}")
        else:
            logger.warning("That action is not yet implemented.")

    def release(self, name, broker_args=None):
        if broker_args == None:
            broker_args = {}
        return self.execute(
            workflow=settings.ANSIBLETOWER.release_workflow,
            source_vm=name,
            **broker_args,
        )


def awxkit_representer(dumper, data):
    """In order to resolve awxkit objects, a custom representer is needed"""
    return dumper.represent_dict(dict(data))
예제 #24
0
    def create_library_file(self):
        """Populate a library file with filled command templates"""
        library_file = Path("libs/templates/hammer/main.py.template")
        if not library_file.exists():
            logger.error(f"Unable to find {library_file}.")
            return

        logger.debug(f"Creating {self.cli_name}.py file.")
        compiled_options = "\n".join(
            [f"'{opt}'," for opt in self.cli_dict[self.cli_name].get("options", [])]
        )

        sub_classes, sub_methods = [], []
        for command, contents in (
            self.cli_dict[self.cli_name].get("sub_commands", {}).items()
        ):
            if "sub_commands" in contents:
                sub_classes.append(
                    [
                        command,
                        self.fill_subcommand_class_template(
                            contents, f"{self.cli_name} {command}"
                        ),
                    ]
                )
            else:
                sub_methods.append(
                    self.fill_method_template(
                        self.cli_name, command, contents.get("options")
                    )
                )

        subclass_assignments = [
            f"self.{subclass[0].replace('-','_')} = self.{self.name_to_class(subclass[0])}()"
            for subclass in sub_classes
        ]
        if subclass_assignments:
            subclass_assignments = "\n".join(subclass_assignments)
            subclass_assignments = f"{shift_text(subclass_assignments, 2)}\n\n"
        else:
            subclass_assignments = ""

        loaded_cmd_f = None
        with library_file.open("r+") as cmd_file:
            loaded_cmd_f = cmd_file.read()

        loaded_cmd_f = loaded_cmd_f.replace(
            "~~Project Name~~", self.name_to_proper_name(self.cli_name)
        )
        loaded_cmd_f = loaded_cmd_f.replace(
            "~~MainCommandClass~~", self.name_to_class(self.cli_name)
        )
        loaded_cmd_f = loaded_cmd_f.replace("~~main command~~", self.cli_name)
        loaded_cmd_f = loaded_cmd_f.replace(
            "~~command options~~", shift_text(compiled_options, 3)
        )
        loaded_cmd_f = loaded_cmd_f.replace(
            "~~subclass assignments~~", subclass_assignments
        )
        loaded_cmd_f = loaded_cmd_f.replace(
            "~~sub methods~~", shift_text("\n".join(sub_methods), 1)
        )
        loaded_cmd_f = loaded_cmd_f.replace(
            "~~sub classes~~",
            shift_text("\n".join([subclass[1] for subclass in sub_classes]), 0),
        )

        save_file = Path(f"libs/generated/hammer/{self.cli_version}/{self.cli_name}.py")
        if save_file.exists():
            logger.warning(f"Overwriting {save_file}")
            save_file.unlink()
        # create the directory, if it doesn't exist
        save_file.parent.mkdir(parents=True, exist_ok=True)
        save_file.touch()
        logger.info(f"Saving results to {save_file}")
        with save_file.open("w+") as outfile:
            outfile.write(loaded_cmd_f)
예제 #25
0
def download_single(raw_song, number=None):
    """ Logic behind downloading a song. """
    content, meta_tags = youtube_tools.match_video_and_metadata(raw_song)

    if content is None:
        log.debug("Found no matching video")
        return

    if const.args.download_only_metadata and meta_tags is None:
        log.info("Found no metadata. Skipping the download")
        return

    # "[number]. [artist] - [song]" if downloading from list
    # otherwise "[artist] - [song]"
    youtube_title = youtube_tools.get_youtube_title(content, number)
    log.info("{} ({})".format(youtube_title, content.watchv_url))

    # generate file name of the song to download
    songname = content.title

    if meta_tags is not None:
        refined_songname = internals.format_string(const.args.file_format,
                                                   meta_tags,
                                                   slugification=True)
        log.debug('Refining songname from "{0}" to "{1}"'.format(
            songname, refined_songname))
        if not refined_songname == " - ":
            songname = refined_songname
    else:
        if not const.args.no_metadata:
            log.warning("Could not find metadata")
        songname = internals.sanitize_title(songname)

    if const.args.dry_run:
        return

    if not check_exists(songname, raw_song, meta_tags):
        # deal with file formats containing slashes to non-existent directories
        songpath = os.path.join(const.args.folder, os.path.dirname(songname))
        os.makedirs(songpath, exist_ok=True)
        input_song = songname + const.args.input_ext
        output_song = songname + const.args.output_ext
        if youtube_tools.download_song(input_song, content):
            print("")
            try:
                convert.song(
                    input_song,
                    output_song,
                    const.args.folder,
                    avconv=const.args.avconv,
                    trim_silence=const.args.trim_silence,
                )
            except FileNotFoundError:
                encoder = "avconv" if const.args.avconv else "ffmpeg"
                log.warning(
                    "Could not find {0}, skipping conversion".format(encoder))
                const.args.output_ext = const.args.input_ext
                output_song = songname + const.args.output_ext

            if not const.args.input_ext == const.args.output_ext:
                os.remove(os.path.join(const.args.folder, input_song))
            if not const.args.no_metadata and meta_tags is not None:
                metadata.embed(os.path.join(const.args.folder, output_song),
                               meta_tags)
            return True
예제 #26
0
 def fix_name(name):
     """Determine if the name is reserved and adjust if needed"""
     if name in dir(builtins) or name in ["import"]:
         logger.warning(f"{name} is a python builtin, changing to {name}_")
         name = f"{name}_"
     return name
예제 #27
0
def insert_column(values, df=None, col=0):
    """insert values to df's col-th column """

    if not isinstance(values, (list, tuple)):
        logger.error("Values not list nor tuple: %s", type(values))
        return None

    # len_ = len(values)
    if df is None:
        df = pd.DataFrame({"text1": [""], "text2": [""], "merit": [""]})

    try:
        col = int(col)
    except Exception as exc:
        logger.error("col = int(col) Exceptinon: %s, set to 0", exc)
        col = 0

    if col > 2:
        logger.warning("col > 2: %s, set to 0", col)
        col = 0

    columns = ["text1", "text2", "merit"]

    # gen = lambda col0, col1, col2: np.asarray([*zip_longest(df.text1, lst, df.merit, fillvalue='')], dtype=str)

    def gen_df(col0, col1, col2):
        return pd.DataFrame(
            np.asarray([*zip_longest(col0, col1, col2, fillvalue="")],
                       dtype=str),
            columns=columns,
        )

    if col == 0:
        df = gen_df(values, df.text2, df.merit)

    if col == 1:
        df = gen_df(df.text1, values, df.merit)

    if col == 2:
        df = gen_df(df.text1, df.text2, values)

    # remove empty tail where row elements are all ""
    _ = """
    arr = np.asarray(df)
    len_ = len(arr)

    for elm in range(len_):
        if ''.join(arr[len_ - elm - 1]):
            tail = elm
            break
    else:  # all rows empty
        tail = len_

    return df[0: len_ - tail]
    # """

    # remove rows with all empty entries
    # keep = df.applymap(bool).any(axis=1)
    # drop_index = [idx for idx, elm in enumerate(keep) if not elm]
    # return df.drop(drop_index)

    # remove rows with all empty entries
    return df.replace("", np.nan).dropna(how="all").replace(np.nan, "")
예제 #28
0
def proc_argv(argv):
    """ __main__ main """

    version = "0.0.2"
    if FLAGS.version:
        print("deepl-tr-async %s" % version)

    if FLAGS.copyfrom:
        text = pyperclip.paste()
        logger.debug("text from clipboard: %s", text)
    else:
        text = ' '.join(argv[1:])
        logger.debug("argv from terminal: %s", text)

    try:
        text = text.strip()
    except Exception as exc:
        logger.warning("text.strip() exc: %s, exiting...", exc)
        text = ""

    if not text:
        return None

    # del argv

    if FLAGS.debug:
        logzero.loglevel(10)  # logging.DEBUG
    else:
        logzero.loglevel(20)  # logging.INFO

    logger.debug('\n\t args: %s',
                 dict((elm, getattr(FLAGS, elm)) for elm in FLAGS))

    # to_lang = FLAGS.to_lang
    # from_lang = FLAGS.from_lang

    # to_lang = getattr(FLAGS, "to-lang")
    # from_lang = getattr(FLAGS, "from-lang")
    # width = getattr(FLAGS, "width")
    # copyto = getattr(FLAGS, "copyto")
    # debug = getattr(FLAGS, "debug")

    args = [
        'lang0',
        'lang1',
        'lang2',
        'width'
        'copyfrom',
        'copyto',
        'debug',
    ]
    # for elm in args: locals()[elm] = getattr(FLAGS, elm)
    lang0 = getattr(FLAGS, "mother-lang")
    lang1 = getattr(FLAGS, "second-lang")
    lang2 = getattr(FLAGS, "third-lang")
    width = FLAGS.width
    # copyfrom = FLAGS.copyfrom
    copyto = FLAGS.copyto
    debug = FLAGS.debug

    # if getattr(FLAGS, "debug"):
    if debug:
        logger.debug("args: %s", [[elm, getattr(FLAGS, elm)] for elm in args])

    # make it unique and not the same as s_lang
    s_lang = detect_lang(text)
    logger.info(" detected language: %s", s_lang)

    lang_list = []
    for elm in [lang0, lang1, lang2]:
        if elm not in lang_list and elm not in [s_lang]:
            lang_list.append(elm)

    if not lang_list:
        logger.info(" languages picked: %s", [lang0, lang1, lang2])
        logger.warning(
            " Nothing to do. Select proper languages and source text and try again, exiting... ..."
        )
        return None

    if len(lang_list) < 2:
        logger.warning(" Only one language %s is selected. We'll proceed tho.",
                       lang_list)

    tasks = []
    for elm in lang_list:
        task = deepl_tr_async(text, from_lang=s_lang, to_lang=elm)
        tasks.append(task)

    # google tr
    tasks_g = []
    for elm in lang_list:
        task = google_tr_async(text, from_lang=s_lang, to_lang=elm)
        tasks_g.append(task)

    len_ = len(tasks)
    try:
        # trtext = LOOP.run_until_complete(task)
        _ = asyncio.gather(*tasks, *tasks_g)
        trtext_ = LOOP.run_until_complete(_)
    except Exception as exc:
        logger.error("LOOP.run_until_complete exc: %s", exc)
        trtext_ = [str(exc)] * len_

    trtext, trtext_g = trtext_[:len_], trtext_[len_:]

    prefix = " deepl: "
    indent = ' ' * len(prefix)
    ftext = prefix
    for elm in trtext:
        if detect_lang(elm) in ['zh', 'ja']:
            ftext += fill(elm, width // 2, subsequent_indent=indent) + "\n"
        else:
            ftext += fill(
                elm, width, initial_indent=indent,
                subsequent_indent=indent) + "\n"

    prefix = " google: "
    indent = ' ' * len(prefix)
    ftext_g = prefix
    for elm in trtext_g:
        if detect_lang(elm) in ['zh', 'ja']:
            ftext_g += fill(elm, width // 2, subsequent_indent=indent) + "\n"
        else:
            ftext_g += fill(
                elm, width, initial_indent=indent,
                subsequent_indent=indent) + "\n"

    prefix = " deepl: "
    indent = ' ' * len(prefix)
    if detect_lang(text) in ['zh', 'ja']:
        text_ = fill(
            text, width // 2, initial_indent=indent,
            subsequent_indent=indent) + "\n"
    else:
        text_ = fill(
            text, width, initial_indent=indent,
            subsequent_indent=indent) + "\n"

    _ = text_ + ftext + ftext_g

    if copyto:
        pyperclip.copy(_)

    logger.info("translated to %s: \n\t%s", ', '.join(lang_list), _)
예제 #29
0
def generate_from_vegeta_result(run: Run, export_format: str):
    """
    Generate charts from probes that pulled data out of Prometheus. The charts
    are serialized to SVG (for HTML reports) and PNG (for PDF reports).
    """
    vegeta_path = shutil.which("vegeta")
    if not vegeta_path:
        logger.warning("Failed to find the 'vegeta' binary in PATH")
        return

    provider = run["activity"]["provider"]
    args = provider.get("arguments")
    if not args:
        return

    if isinstance(args, str):
        args = shlex.split(args)
    elif isinstance(args, dict):
        args = itertools.chain.from_iterable(args.items())
        args = list([str(p) for p in args if p not in (None, "")])

    if "attack" not in args:
        return

    result_path = None
    for idx, t in enumerate(args):
        if t.startswith("-output="):
            result_path = t[8:]
            break
        elif t.startswith("-output"):
            result_path = args[idx + 1]
            break

    if not result_path:
        return

    if not os.path.exists(result_path):
        logger.warning(
            "vegeta result path could not be found: {}".format(result_path))
        return

    cmd = "{} report --type text {}".format(vegeta_path, result_path)
    try:
        proc = subprocess.run(cmd,
                              timeout=10,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              shell=True,
                              check=True)
    except subprocess.CalledProcessError as x:
        logger.error("vegeta reporter failed: {}".format(str(x)))
    except subprocess.TimeoutExpired:
        logger.error("vegeta reporter took too long to complete")
    else:
        run["text"] = proc.stdout.decode('utf-8')

    cmd = "{} encode --to json {}".format(vegeta_path, result_path)
    try:
        proc = subprocess.run(cmd,
                              timeout=10,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              shell=True,
                              check=True)
    except subprocess.CalledProcessError as x:
        logger.error("vegeta reporter failed: {}".format(str(x)))
    except subprocess.TimeoutExpired:
        logger.error("vegeta dumper took too long to complete")
    else:
        calls = proc.stdout.decode('utf-8').strip().replace("\n", ",")
        data = json.loads("[{}]".format(calls))

        def latency_chart() -> pygal.Line:
            chart = pygal.Line(x_label_rotation=20,
                               style=DefaultStyle,
                               logarithmic=True,
                               show_minor_x_labels=False,
                               legend_at_bottom=False)
            chart.title = "HTTP Latency"
            chart.y_title = "Latency (in ms)"
            chart.x_labels = [call["timestamp"] for call in data]
            num_entries = len(chart.x_labels)
            step = 10
            if num_entries > 100:
                step = 200
            elif num_entries > 1000:
                step = 2000
            chart.x_labels_major = chart.x_labels[::step]

            y_values = {}
            for index, call in enumerate(data):
                code = str(call["code"])
                if code not in y_values:
                    y_values[code] = [None] * num_entries

                latency = call["latency"] / 1000000.
                y_values[code].insert(index, latency)

            for code, latencies in y_values.items():
                chart.add(code, latencies, allow_interruptions=True)

            return chart

        def status_distribution() -> pygal.Bar:
            chart = pygal.Bar(x_label_rotation=20,
                              style=DefaultStyle,
                              show_minor_x_labels=False,
                              legend_at_bottom=False)
            chart.title = "Distribution of HTTP Responses Per Second"
            chart.y_title = "Status Code Count"

            status_intervals = {}
            for call in data:
                ts = call["timestamp"]
                dt = dateparser.parse(ts)
                by_second_dt = dt.replace(microsecond=0).isoformat()
                if by_second_dt not in status_intervals:
                    status_intervals[by_second_dt] = {}

                code = call["code"]
                if code not in status_intervals[by_second_dt]:
                    status_intervals[by_second_dt][code] = 0
                status_intervals[by_second_dt][code] = \
                    status_intervals[by_second_dt][code] + 1

            chart.x_labels = list(status_intervals.keys())
            chart.x_labels_major = chart.x_labels[::5]

            num_entries = len(chart.x_labels)
            y_values = {}
            for index, interval in enumerate(status_intervals):
                for code, count in status_intervals[interval].items():
                    if code not in y_values:
                        y_values[code] = [None] * num_entries
                    y_values[code].insert(index, count)

            for code, count in y_values.items():
                chart.add(str(code), count, allow_interruptions=True)

            return chart

        def add_chart(chart: pygal.Graph):
            if "charts" not in run:
                run["charts"] = []

            if export_format in ["html", "html5"]:
                run["charts"].append(
                    chart.render(disable_xml_declaration=True))
            else:
                run["charts"].append(
                    b64encode(
                        cairosvg.svg2png(bytestring=chart.render(),
                                         dpi=72)).decode("utf-8"))

        add_chart(latency_chart())
        add_chart(status_distribution())
예제 #30
0
def load_secrets_from_vault(
    secrets_info: Dict[str, Dict[str, str]],  # noqa: C901
    configuration: Configuration = None,
    extra_vars: Dict[str, Any] = None,
) -> Secrets:
    """
    Load secrets from Vault KV secrets store

    In your experiment:

    ```
    {
        "k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar"
            }
        }
    }
    ```

    This will read the Vault secret at path `secret/foo/bar`
    (or `secret/data/foo/bar` if you use Vault KV version 2) and store its
    entirely payload into Chaos Toolkit `mykey`. This means, that all kays
    under that path will be available as-is. For instance, this could be:

    ```
    {
        "mypassword": "******",
        "mylogin": "******"k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar",
                "key": "mypassword"
            }
        }
    }
    ```

    In that case, `mykey` will be set to the value at `secret/foo/bar` under
    the Vault secret key `mypassword`.
    """
    secrets = {}

    client = create_vault_client(configuration)

    for (target, keys) in secrets_info.items():
        secrets[target] = {}

        for (key, value) in keys.items():
            if isinstance(value, dict) and value.get("type") == "vault":
                if not HAS_HVAC:
                    logger.error(
                        "Install the `hvac` package to fetch secrets "
                        "from Vault: `pip install chaostoolkit-lib[vault]`.")
                    return {}

                path = value.get("path")
                if path is None:
                    logger.warning(
                        "Missing Vault secret path for '{}'".format(key))
                    continue

                # see https://github.com/chaostoolkit/chaostoolkit/issues/98
                kv = client.secrets.kv
                is_kv1 = kv.default_kv_version == "1"
                if is_kv1:
                    vault_payload = kv.v1.read_secret(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"),
                    )
                else:
                    vault_payload = kv.v2.read_secret_version(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"),
                    )

                if not vault_payload:
                    logger.warning(
                        "No Vault secret found at path: {}".format(path))
                    continue

                if is_kv1:
                    data = vault_payload.get("data")
                else:
                    data = vault_payload.get("data", {}).get("data")

                if "key" in value:
                    vault_key = value["key"]
                    if vault_key not in data:
                        logger.warning(
                            "No Vault key '{}' at secret path '{}'".format(
                                vault_key, path))
                        continue

                    secrets[target][key] = data.get(vault_key)

                else:
                    secrets[target][key] = data

        if not secrets[target]:
            secrets.pop(target)

    return secrets
예제 #31
0
    def make_search(self,
                    origin='Paris',
                    destination='Bordeaux',
                    jour='08',
                    mois='04',
                    annee='2020'):
        logger.warning('Realize research according to selected parameters')

        # Select origin
        logger.info('From {}'.format(origin))
        searchbox = self.driver.find_element_by_id('vsb-origin-train-launch')
        searchbox.clear()
        logger.debug('Wait 1s')
        time.sleep(1.1)
        searchbox.send_keys(origin)
        logger.debug('Wait 3s')
        time.sleep(3.3)
        searchbox.send_keys(Keys.ENTER)

        # Select destination
        logger.info('To {}'.format(destination))
        searchbox2 = self.driver.find_element_by_id(
            'vsb-destination-train-launch')
        searchbox2.clear()
        logger.debug('Wait 1s')
        time.sleep(1.2)
        searchbox2.send_keys(destination)
        logger.debug('Wait 3s')
        time.sleep(2.7)
        searchbox2.send_keys(Keys.ENTER)

        # Select date
        id_ = 'train-launch-d-{}-{}-{}'.format(jour, mois, annee)
        logger.info('Date {}'.format(id_))
        logger.info('- Open date selector')
        date = self.driver.find_element_by_id(
            'vsb-dates-dialog-train-launch-aller-retour-1')
        date.click()
        logger.debug('Wait 4s')
        time.sleep(4.23)
        # previous_month = self.driver.find_element_by_id('previousMonth')
        # next_month = self.driver.find_element_by_id('nextMonth')
        logger.info('- Select choosen date')
        date_selected = self.driver.find_element_by_id(id_)
        date_selected.click()
        logger.debug('Wait 2s')
        time.sleep(1.95)
        # Eventuall select hour
        logger.info('- Validate date')
        button = self.driver.find_element_by_id(
            'vsb-datepicker-train-launch-aller-retour-submit')
        button.click()
        logger.debug('Wait 4s')
        time.sleep(4.1)

        # Validate research
        logger.info('Validate research')
        btn = self.driver.find_element_by_id('vsb-booking-train-launch-submit')
        btn.click()
        logger.debug('Wait 15s')
        time.sleep(15)
예제 #32
0
    def select_train(self, horaire_souhaite='15:52'):
        logger.warning('Select train : {}'.format(horaire_souhaite))

        tgvmax_signature = ['meilleur', 'prix', '0', '€', '0,00', '€']

        ##########################
        # Get list of existing trains
        logger.info('>>> Extract list of trains')
        list_of_trains = self.driver.find_elements_by_xpath(
            '//button[@data-auto="BTN_TRAVEL_SUMMARY"]')
        #print([i.location for i in list_of_trains])
        logger.info(' - Found total : ', len(list_of_trains))

        # Get list of boughtable trains
        list_of_prices = self.driver.find_elements_by_xpath(
            '//button[@data-auto="BTN_PRICEBTN_SECOND"]')
        logger.info(' - Found to buy : ', len(list_of_prices))

        # Keep list of tgvmax
        list_of_tgvmax = [
            i for i in list_of_prices if i.text.split() == tgvmax_signature
        ]
        y_buttons = [i.location['y'] for i in list_of_tgvmax]
        # print([i.location for i in list_of_tgvmax])
        logger.info(' - Found TGVMax : ', len(list_of_tgvmax))

        # Keep train tgvmax
        travel_2_keep = []
        for train in list_of_trains:
            if train.location['y'] in y_buttons:
                travel_2_keep.append(train)
        logger.info('>>> TGVMAX POWER:')
        _ = [logger.debug((i.location, i.text.split())) for i in travel_2_keep]

        # Check if horaire is available
        for travel in travel_2_keep:
            if horaire_souhaite in travel.text:
                wanted_button = travel
        logger.info('Train found')
        ##########################

        # If horaire is ok
        logger.info('Click on selected train')
        self.scroll_to_element(wanted_button)

        ##########################
        # Get list of existing trains
        logger.info('>>> Extract list of trains')
        list_of_trains = self.driver.find_elements_by_xpath(
            '//button[@data-auto="BTN_TRAVEL_SUMMARY"]')
        #print([i.location for i in list_of_trains])
        logger.info(' - Found total : ', len(list_of_trains))

        # Get list of boughtable trains
        list_of_prices = self.driver.find_elements_by_xpath(
            '//button[@data-auto="BTN_PRICEBTN_SECOND"]')
        logger.info(' - Found to buy : ', len(list_of_prices))

        # Keep list of tgvmax
        list_of_tgvmax = [
            i for i in list_of_prices if i.text.split() == tgvmax_signature
        ]
        y_buttons = [i.location['y'] for i in list_of_tgvmax]
        # print([i.location for i in list_of_tgvmax])
        logger.info(' - Found TGVMax : ', len(list_of_tgvmax))

        # Keep train tgvmax
        travel_2_keep = []
        for train in list_of_trains:
            if train.location['y'] in y_buttons:
                travel_2_keep.append(train)
        logger.info('>>> TGVMAX POWER:')
        _ = [logger.debug((i.location, i.text.split())) for i in travel_2_keep]

        # Check if horaire is available
        for travel in travel_2_keep:
            if horaire_souhaite in travel.text:
                wanted_button = travel
        logger.info('Train found')
        ##########################

        wanted_button.click()
        logger.debug('Wait 7s')
        time.sleep(6.7)
예제 #33
0
    def MakeTransaction(self,
                        tx,
                        change_address=None,
                        fee=Fixed8(0),
                        from_addr=None,
                        use_standard=False,
                        watch_only_val=0,
                        exclude_vin=None,
                        use_vins_for_asset=None):
        """
        This method is used to to calculate the necessary TransactionInputs (CoinReferences) and TransactionOutputs to
        be used when creating a transaction that involves an exchange of system assets, ( NEO, Gas, etc ).

        Args:
            tx (Transaction): The Transaction to be used.
            change_address (UInt160): The address any change for the transaction should be returned to.
            fee (Fixed8): A fee to be attached to the Transaction for network processing purposes.
            from_addr (UInt160): If present, all CoinReferences selected will only come from this address.
            use_standard (bool): If true, only CoinReferences from standard addresses ( not contracts that are smart contracts ) will be used.
            watch_only_val (int): 0 or CoinState.WATCH_ONLY, if present only choose coins that are in a WatchOnly address.
            exclude_vin (list): A list of CoinReferences to NOT use in the making of this tx.
            use_vins_for_asset (list): A list of CoinReferences to use.

        Returns:
            tx: (Transaction) Returns the transaction with oupdated inputs and outputs.
        """

        tx.ResetReferences()
        tx.ResetHashData()

        if not tx.outputs:
            tx.outputs = []
        if not tx.inputs:
            tx.inputs = []

        fee = fee + (tx.SystemFee() * Fixed8.FD())

        #        pdb.set_trace()

        paytotal = {}
        if tx.Type != int.from_bytes(TransactionType.IssueTransaction, 'little'):

            for key, group in groupby(tx.outputs, lambda x: x.AssetId):
                sum = Fixed8(0)
                for item in group:
                    sum = sum + item.Value
                paytotal[key] = sum
        else:
            paytotal = {}

        if fee > Fixed8.Zero():

            if Blockchain.SystemCoin().Hash in paytotal.keys():
                paytotal[Blockchain.SystemCoin().Hash] = paytotal[Blockchain.SystemCoin().Hash] + fee
            else:
                paytotal[Blockchain.SystemCoin().Hash] = fee

        paycoins = {}

        self._vin_exclude = exclude_vin

        for assetId, amount in paytotal.items():

            if use_vins_for_asset is not None and len(use_vins_for_asset) > 0 and use_vins_for_asset[1] == assetId:
                paycoins[assetId] = self.FindCoinsByVins(use_vins_for_asset[0])
            else:
                paycoins[assetId] = self.FindUnspentCoinsByAssetAndTotal(
                    assetId, amount, from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val)

        self._vin_exclude = None

        for key, unspents in paycoins.items():
            if unspents is None:
                if not self.IsSynced:
                    logger.warning("Wait for your wallet to be synced before doing "
                                   "transactions. To check enter 'wallet' and look at "
                                   "'percent_synced', it should be 100. Also the blockchain "
                                   "should be up to the latest blocks (see Progress). Issuing "
                                   "'wallet rebuild' restarts the syncing process.")
                    return None

                else:
                    logger.error("insufficient funds for asset id: %s " % key)
                    return None

        input_sums = {}

        for assetId, unspents in paycoins.items():
            sum = Fixed8(0)
            for coin in unspents:
                sum = sum + coin.Output.Value
            input_sums[assetId] = sum

        if not change_address:
            change_address = self.GetChangeAddress(from_addr=from_addr)

        new_outputs = []

        for assetId, sum in input_sums.items():
            if sum > paytotal[assetId]:
                difference = sum - paytotal[assetId]
                output = TransactionOutput(AssetId=assetId, Value=difference, script_hash=change_address)
                new_outputs.append(output)

        inputs = []

        for item in paycoins.values():
            for ref in item:
                inputs.append(ref.Reference)

        tx.inputs = inputs
        tx.outputs = tx.outputs + new_outputs

        return tx