Exemple #1
0
def main(ctx, verbose=False, debug=False, log_file=None,
         env_prefix=None, global_urls_variable=None):
    formatter = daiquiri.formatter.ColorFormatter(
        fmt="%(color)s%(levelname)s "
        "[%(name)s] %(message)s%(color_stop)s")

    outputs = [
        daiquiri.output.Stream(sys.stderr, formatter=formatter)
    ]

    if log_file:
        outputs.append(daiquiri.output.File(log_file,
                                            formatter=formatter))

    ctx.obj = {
        "debug": debug,
    }
    if env_prefix is not None:
        ctx.obj['env_prefix'] = env_prefix
    if global_urls_variable is not None:
        ctx.obj['global_urls_variable'] = global_urls_variable

    if debug:
        level = logging.DEBUG
    elif verbose:
        level = logging.INFO
    else:
        level = logging.WARNING

    daiquiri.setup(outputs=outputs, level=level)
Exemple #2
0
def setup_logging(verbose=0):
    global logging
    progname = basename(sys.argv[0])
    daiquiri.setup(
        level=(logging.ERROR - 10*verbose),
        program_name=progname)
    logging = daiquiri.getLogger(progname)
Exemple #3
0
    def create_logger(self, log_level=logging.INFO):
        daiquiri.setup(
            outputs=(
                daiquiri.output.File(
                    directory=self.settings.get('LOG_PATH'),
                    program_name=self.project_name,
                ),
                daiquiri.output.STDOUT,
            )
        )

        daiquiri.getLogger(program_name=self.project_name).logger.level = log_level
        self.logger = daiquiri.getLogger(
            program_name=self.project_name, log_level=log_level
        )
Exemple #4
0
def init_log_info(log_path, log_filename):
    if not os.path.isdir(log_path):
        os.mkdir(log_path)
    log_file = log_path + "/" + log_filename
    daiquiri.setup(
        level=logging.INFO,
        outputs=(
            daiquiri.output.Stream(formatter=daiquiri.formatter.ColorFormatter(
                fmt="%(asctime)s [PID %(process)d] [%(levelname)s] "
                    "%(name)s:%(lineno)d %(message)s")),
            daiquiri.output.File(log_file,
                                 formatter=daiquiri.formatter.ColorFormatter(
                                     fmt="%(asctime)s [PID %(process)d] "
                                         "[%(levelname)s] %(name)s:%(lineno)d "
                                         "%(message)s")),
        )
    )
Exemple #5
0
    def configure_logging(self):
        formatter = daiquiri.formatter.ColorFormatter(
            fmt="%(color)s%(levelname)s "
            "[%(name)s] %(message)s%(color_stop)s")

        outputs = [
            daiquiri.output.Stream(sys.stderr, formatter=formatter)
        ]

        if self.options.log_file:
            outputs.append(daiquiri.output.File(self.options.log_file,
                                                formatter=formatter))

        if self.options.debug:
            level = logging.DEBUG
        elif self.options.verbose_level == 1:
            level = logging.INFO
        else:
            level = logging.WARNING

        daiquiri.setup(outputs=outputs, level=level)
Exemple #6
0
import logging

import daiquiri
import daiquiri.formatter

daiquiri.setup(level=logging.INFO,
               outputs=[
                   daiquiri.output.Stream(
                       formatter=daiquiri.formatter.ColorExtrasFormatter(
                           fmt=(daiquiri.formatter.DEFAULT_FORMAT +
                                " [%(subsystem)s is %(mood)s]" + "%(extras)s"),
                           keywords=['mood', 'subsystem'],
                       ))
               ])

logger = daiquiri.getLogger(__name__, subsystem="example")
logger.info("It works and log to stderr by default with color!",
            mood="happy",
            arbitrary_context="included")
Exemple #7
0
    https://www.python.org/dev/peps/pep-0503/
 * A Platform Tag for Portable Linux Built Distributions
    https://www.python.org/dev/peps/pep-0513/
 * The Wheel Binary Package Format 1.0
    https://www.python.org/dev/peps/pep-0427/
"""

import os
import sys
import re
import logging

import click
import daiquiri

daiquiri.setup()

_LOGGER = logging.getLogger(__name__)

# Adjusted based on: https://www.python.org/dev/peps/pep-0427/
_WHEEL_RE = re.compile(
    "(?P<distribution>.+)-(?P<version>.+)(-(?P<build_tag>.+))?-(?P<python_tag>.+)-(?P<abi_tag>.+)-(?P<platform_tag>.+).whl"
)


def _check_python_artifacts(package_dir: str) -> bool:
    """Check Python artifacts present in the corresponding package directory."""
    any_error = False

    for package_artifact in os.listdir(package_dir):
        artifact_path = os.path.join(package_dir, package_artifact)
Exemple #8
0
def main():

    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers()
    subparsers.required = True
    subparsers.dest = "command"

    subparser = subparsers.add_parser("simplify")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output", type=str, help="Input tree sequence")
    subparser.set_defaults(func=run_simplify)

    subparser = subparsers.add_parser("sequential-augment")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("--num-threads", type=int, default=0)
    subparser.add_argument("--seed", type=int, default=1)
    subparser.set_defaults(func=run_sequential_augment)

    subparser = subparsers.add_parser("combine-ukbb-1kg")
    subparser.add_argument("chromosome", type=str, help="chromosome stem")
    subparser.add_argument("--num-individuals",
                           type=int,
                           help="number of individuals to use",
                           default=None)
    subparser.set_defaults(func=run_combine_ukbb_1kg)

    subparser = subparsers.add_parser("benchmark-tskit")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument(
        "--num-variants",
        type=int,
        default=None,
        help="Number of variants to benchmark genotypes decoding performance on"
    )
    subparser.set_defaults(func=run_benchmark_tskit)

    subparser = subparsers.add_parser("benchmark-vcf")
    subparser.add_argument("input", type=str, help="Input VCF")
    subparser.add_argument(
        "--num-variants",
        type=int,
        default=None,
        help="Number of variants to benchmark genotypes decoding performance on"
    )
    subparser.set_defaults(func=run_benchmark_vcf)

    subparser = subparsers.add_parser("compute-1kg-ukbb-gnn")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output",
                           type=str,
                           help="Filename to write CSV to.")
    subparser.add_argument("--num-threads", type=int, default=16)
    subparser.set_defaults(func=run_compute_1kg_ukbb_gnn)

    subparser = subparsers.add_parser("compute-ukbb-gnn")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output",
                           type=str,
                           help="Filename to write CSV to.")
    subparser.add_argument("--num-threads", type=int, default=16)
    subparser.set_defaults(func=run_compute_ukbb_gnn)

    subparser = subparsers.add_parser("compute-1kg-gnn")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output",
                           type=str,
                           help="Filename to write CSV to.")
    subparser.add_argument("--num-threads", type=int, default=16)
    subparser.set_defaults(func=run_compute_1kg_gnn)

    subparser = subparsers.add_parser("compute-sgdp-gnn")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output",
                           type=str,
                           help="Filename to write CSV to.")
    subparser.add_argument("--num-threads", type=int, default=16)
    subparser.set_defaults(func=run_compute_sgdp_gnn)

    subparser = subparsers.add_parser("snip-centromere")
    subparser.add_argument("input", type=str, help="Input tree sequence")
    subparser.add_argument("output", type=str, help="Output tree sequence")
    subparser.add_argument("chrom", type=str, help="Chromosome name")
    subparser.add_argument("centromeres",
                           type=str,
                           help="CSV file containing centromere coordinates.")
    subparser.set_defaults(func=run_snip_centromere)

    daiquiri.setup(level="INFO")

    args = parser.parse_args()
    args.func(args)
Exemple #9
0
from collections import deque
import random

import gym

import numpy as np
import tensorflow as tf
import daiquiri

daiquiri.setup(level=daiquiri.logging.DEBUG)
logger = daiquiri.getLogger(__name__)

RENDER = False


class Neuro:
    def __init__(self):

        n_in = 4
        n_hidden = 4
        n_out = 2

        initializer = tf.contrib.layers.variance_scaling_initializer()
        self.input_ = tf.placeholder(dtype=tf.float32, shape=[None, n_in])
        hidden = tf.layers.dense(self.input_, n_hidden, activation=tf.nn.tanh)
        self.probs = tf.nn.softmax(tf.layers.dense(hidden, 2))
        self.gradients = tf.gradients(self.probs[0, 0],
                                      tf.trainable_variables())

    def run(self, input_arr):
        return self.sess.run(self.probs, feed_dict={self.input_: input_arr})
Exemple #10
0
:Created:
    8/23/20
"""
import logging
import hashlib
import os
from pathlib import Path

import click
import daiquiri

cwd = os.path.dirname(os.path.realpath(__file__))
logfile = cwd + "/offline_checksum.log"
daiquiri.setup(level=logging.DEBUG,
               outputs=(
                   daiquiri.output.File(logfile),
                   "stdout",
               ))
logger = daiquiri.getLogger(__name__)


def get_files(data: Path, ext: str = ""):
    f = list()
    if len(ext) > 0:
        ext = "." + ext.lstrip(".")
    files = data.rglob(f"*{ext}")
    for file in files:
        if Path(file).is_file():
            f.append(file)
    return f
Exemple #11
0
def main(debug=False):
    colorama.init()
    daiquiri.setup(level=logging.DEBUG if debug else logging.WARNING)
Exemple #12
0
def prepare_service(args=None, conf=None,
                    default_config_files=None,
                    log_to_std=False, logging_level=None):
    if conf is None:
        conf = cfg.ConfigOpts()
    opts.set_defaults()
    # FIXME(jd) Use the pkg_entry info to register the options of these libs
    db_options.set_defaults(conf)
    policy_opts.set_defaults(conf)

    # Register our own Gnocchi options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    conf.register_cli_opts(opts._cli_options)

    conf.set_default("workers", utils.get_default_workers(), group="metricd")

    conf(args, project='gnocchi', validate_default_values=True,
         default_config_files=default_config_files,
         version=pbr.version.VersionInfo('gnocchi').version_string())

    if not log_to_std and (conf.log_dir or conf.log_file):
        outputs = [daiquiri.output.File(filename=conf.log_file,
                                        directory=conf.log_dir)]
    else:
        outputs = [daiquiri.output.STDERR]

    if conf.use_syslog:
        outputs.append(
            daiquiri.output.Syslog(facility=conf.syslog_log_facility))

    if conf.use_journal:
        outputs.append(daiquiri.output.Journal())

    daiquiri.setup(outputs=outputs)
    if logging_level is None:
        if conf.debug:
            logging_level = logging.DEBUG
        elif conf.verbose:
            logging_level = logging.INFO
        else:
            logging_level = logging.WARNING
    logging.getLogger("gnocchi").setLevel(logging_level)

    # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
    archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
        conf.archive_policy.default_aggregation_methods
    )

    # If no coordination URL is provided, default to using the indexer as
    # coordinator
    if conf.storage.coordination_url is None:
        if conf.storage.driver == "redis":
            conf.set_default("coordination_url",
                             conf.storage.redis_url,
                             "storage")
        elif conf.incoming.driver == "redis":
            conf.set_default("coordination_url",
                             conf.incoming.redis_url,
                             "storage")
        else:
            parsed = urlparse.urlparse(conf.indexer.url)
            proto, _, _ = parsed.scheme.partition("+")
            parsed = list(parsed)
            # Set proto without the + part
            parsed[0] = proto
            conf.set_default("coordination_url",
                             urlparse.urlunparse(parsed),
                             "storage")

    cfg_path = conf.oslo_policy.policy_file
    if not os.path.isabs(cfg_path):
        cfg_path = conf.find_file(cfg_path)
    if cfg_path is None or not os.path.exists(cfg_path):
        cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                'rest', 'policy.json'))
    conf.set_default('policy_file', cfg_path, group='oslo_policy')

    conf.log_opt_values(LOG, logging.DEBUG)

    return conf
Exemple #13
0
import logging
import pathlib

import daiquiri
from smart_simulation.cfg_templates.config import package_dir

daiquiri.setup(
    level=logging.INFO,
    outputs=(
        daiquiri.output.Stream(
            formatter=daiquiri.formatter.ColorFormatter(
                fmt="%(asctime)s [%(levelname)s] %(name)s.%(" "funcName)s: %(message)s"
            )
        ),
    ),
)

PACKAGE_PATH = pathlib.Path(package_dir)
SIMULATION_OUTPUTS_PATH = PACKAGE_PATH / "smart_simulation" / "outputs" / "simulations"
SIMULATION_WEIGHTS_PATH = SIMULATION_OUTPUTS_PATH / "weights"
SIMULATION_SERVINGS_PATH = SIMULATION_OUTPUTS_PATH / "servings"


def weight_files(weights_directory: pathlib.PurePath = SIMULATION_WEIGHTS_PATH) -> list:
    """
    Return a list of weight files paths from a directory. The local simulation output directory is the default.
    Args:
        weights_directory: Directory of the weight files.

    Returns: A list of paths of all files in the directory.
 def __init__(self, database=None):
     daiquiri.setup(level=logging.INFO)
     self.logger = daiquiri.getLogger(__name__)
     self.database = Database() if not database else database
     self.faker = Faker()
     self.postal_codes = None
Exemple #15
0
import os
import shutil
import random
import numpy as np
from tqdm import tqdm
from collections import OrderedDict

from config import DIR, TARGET_DIR, OUTPUT_DIR, NUM_TOP_i_IMAGES, NUM_TOP_p_Predictions

import daiquiri as dqr
import logging

dqr.setup(level=logging.DEBUG)
logger = dqr.getLogger()


def maybe_sample_images(dataset_name: str, sample_num: int = 5):
    """Sample some images if the given directory is empty

    Args:
        dataset_name:   dataset_name
        sample_num:     number of images that randomly sampled if there is no images
                        or the directory does not exist at all.
    """
    target_dir = os.path.join(TARGET_DIR, dataset_name)
    # Create directory if not exist
    if not os.path.exists(target_dir):
        logger.warning(
            'Target directory does not exist. Creating directory {}'.format(
                target_dir))
        os.makedirs(target_dir)
Exemple #16
0
from flask import request, Blueprint, jsonify, current_app
from IGitt.GitHub.GitHubRepository import GitHubRepository
from IGitt.GitHub.GitHubIssue import GitHubToken, GitHubIssue

from sesheta.utils import (
    notify_channel,
    random_positive_emoji,
    random_positive_emoji2,
    calculate_pullrequest_size,
    set_size,
)
from sesheta.webhook_processors.github_reviews import *
from sesheta.webhook_processors.github_pull_requests import *
from sesheta.webhook_processors.github_issue_analyzer import analyse_github_issue

daiquiri.setup(level=logging.DEBUG, outputs=("stdout", "stderr"))
_LOGGER = daiquiri.getLogger(__name__)
_RELATED_REGEXP = r"\w+:\ \#([0-9]+)"
_DRY_RUN = os.environ.get("SESHETA_DRY_RUN", False)
_SESHETA_GITHUB_ACCESS_TOKEN = os.getenv("SESHETA_GITHUB_ACCESS_TOKEN", None)
_SESHETA_GITHUB_WEBHOOK_SECRET = os.getenv("SESHETA_GITHUB_WEBHOOK_SECRET",
                                           None)
_GIT_API_REQUEST_HEADERS = {
    "Authorization": "token %s" % _SESHETA_GITHUB_ACCESS_TOKEN
}

webhooks = Blueprint("webhook", __name__, url_prefix="")


def handle_github_open_issue(issue: dict,
                             repository: dict) -> None:  # pragma: no cover
Exemple #17
0
import contextlib
from pathlib import Path

import daiquiri

from _pygitviz import graphviz
from _pygitviz import util
from _pygitviz import git
from _pygitviz.graphviz import git_to_dot

daiquiri.setup(
    level=logging.WARNING,
    outputs=(
        daiquiri.output.Stream(
            sys.stdout,
            formatter=daiquiri.formatter.ColorFormatter(
                fmt="%(color)s[%(levelname)s] %(message)s%(color_stop)s"
            ),
        ),
    ),
)
LOGGER = daiquiri.getLogger(__file__)


def main() -> None:
    """Run the PyGitViz program."""
    operating_system = util.get_os()
    parser = _create_parser(operating_system)
    args = parser.parse_args(sys.argv[1:])

    with _convert_error_to_log(traceback=args.traceback):
Exemple #18
0
import logging

import daiquiri
import daiquiri.formatter

daiquiri.setup(
    level=logging.INFO,
    outputs=(
        daiquiri.output.Stream(
            formatter=daiquiri.formatter.ColorFormatter(
                fmt=(daiquiri.formatter.DEFAULT_FORMAT + " [%(subsystem)s is %(mood)s]")
            )
        ),
    ),
)

logger = daiquiri.getLogger(__name__, subsystem="example")
logger.info("It works and log to stderr by default with color!", mood="happy")
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging

import daiquiri
from pythonjsonlogger import jsonlogger

stream_fmt = "%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s %(name)s: %(message)s%(color_stop)s"
json_fmt = "(asctime) (process) (levelname) (name) (message)"

daiquiri.setup(
    level=logging.INFO,
    outputs=(
        daiquiri.output.Stream(
            formatter=daiquiri.formatter.ColorFormatter(stream_fmt),
        ),
        daiquiri.output.TimedRotatingFile(
            filename="{{ cookiecutter.package_name }}.log",
            formatter=jsonlogger.JsonFormatter(json_fmt, json_default=str),
        ),
    ),
)


def client_logger(name=__name__):
    return daiquiri.getLogger(name)
Exemple #20
0
from datetime import timedelta, datetime
from pathlib import Path
from shutil import rmtree
import daiquiri
import graphyte

import common.config as config
import common.helper as helper
import common.monitor as monitor
from common.monitor import send_series_event, s_events
from common.constants import mercure_defs, mercure_folders

daiquiri.setup(
    level=logging.INFO,
    outputs=(daiquiri.output.Stream(
        formatter=daiquiri.formatter.ColorFormatter(
            fmt="%(color)s%(levelname)-8.8s "
            "%(name)s: %(message)s%(color_stop)s")), ),
)
logger = daiquiri.getLogger("cleaner")


def terminate_process(signalNumber, frame):
    """Triggers the shutdown of the service."""
    helper.g_log("events.shutdown", 1)
    logger.info("Shutdown requested")
    monitor.send_event(monitor.h_events.SHUTDOWN_REQUEST,
                       monitor.severity.INFO)
    # Note: main_loop can be read here because it has been declared as global variable
    if "main_loop" in globals() and main_loop.is_running:
        main_loop.stop()
__version__ = 0.1

import os
import logging
import configparser

import daiquiri
import daiquiri.formatter

_ROOT = os.path.dirname(os.path.abspath(__file__))
_CONFIG = os.path.join(_ROOT, 'config.ini')

FORMAT = ("%(asctime)s :: %(color)s%(levelname)s :: %(name)s :: %(funcName)s :"
          "%(message)s%(color_stop)s")
daiquiri.setup(level=logging.INFO,
               outputs=(daiquiri.output.Stream(
                   formatter=daiquiri.formatter.ColorFormatter(fmt=FORMAT)), ))
logger = daiquiri.getLogger("root")

if not os.path.isfile(_CONFIG):
    logger.error("Configuration file '%s' not found", _CONFIG)
    config = None
else:
    config = configparser.ConfigParser(allow_no_value=True)
    with open(_CONFIG) as fobj:
        config.read_file(fobj)
               initialization=U_init)

    return final_centroids, indicator_vector_final

if __name__ == "__main__":
    logger.info("Command line: " + " ".join(sys.argv))
    log_memory_usage("Memory at startup")
    arguments = docopt.docopt(__doc__)
    paraman = ParameterManagerEfficientNystrom(arguments)
    initialized_results = dict((v, None) for v in lst_results_header)
    resprinter = ResultPrinter(output_file=paraman["--output-file_resprinter"])
    resprinter.add(initialized_results)
    resprinter.add(paraman)
    has_failed = False
    if paraman["-v"] >= 2:
        daiquiri.setup(level=logging.DEBUG)
    elif paraman["-v"] >= 1:
        daiquiri.setup(level=logging.INFO)
    else:
        daiquiri.setup(level=logging.WARNING)

    logging.warning("Verbosity set to warning")
    logging.info("Verbosity set to info")
    logging.debug("Verbosity set to debug")


    try:
        dataset = paraman.get_dataset()

        if dataset["x_train"].dtype != np.float32:
            dataset["x_train"] = dataset["x_train"].astype(np.float32)
Exemple #23
0
import os

from flask import Flask, request
from flask.json import jsonify
from flask_cors import CORS

import src.config as config
from src.exceptions import HTTPError
from fabric8a_auth.auth import AuthError
from src.trained_model_details import trained_model_details
from rudra.utils.validation import check_field_exists
from rudra.deployments.emr_scripts.pypi_emr import PyPiEMR
from rudra.deployments.emr_scripts.maven_emr import MavenEMR
from rudra.deployments.emr_scripts.npm_emr import NpmEMR

daiquiri.setup(level=os.environ.get('FLASK_LOGGING_LEVEL', logging.INFO))
_logger = daiquiri.getLogger(__name__)

app = Flask(__name__)
CORS(app)

emr_instances = {'maven': MavenEMR, 'pypi': PyPiEMR, 'npm': NpmEMR}


@app.route('/api/v1/readiness')
def readiness():
    """Readiness probe."""
    return jsonify({"status": "ready"}), 200


@app.route('/api/v1/liveness')
Exemple #24
0
from cyborg_regidores import __version__ as cyborg_regidores_version
from cyborg_regidores.topic_names import (
    GITHUB_WEBHOOK_TOPIC_NAME,
    GITLAB_WEBHOOK_TOPIC_NAME,
    JIRA_WEBHOOK_TOPIC_NAME,
    TRELLO_WEBHOOK_TOPIC_NAME,
    GOOGLE_CHATBOT_TOPIC_NAME,
    NORMALIZED_EVENTS_TOPIC_NAME,
    AICOE_ACTIVITY_TOPIC_NAME,
)


DEBUG = os.getenv("DEBUG", True)


daiquiri.setup()
_LOGGER = daiquiri.getLogger("webhook2kafka")
_LOGGER.setLevel(logging.DEBUG if DEBUG else logging.INFO)

_KAFAK_BOOTSTRAP_SERVERS = os.getenv("KAFKA_BOOTSTRAP_SERVERS", "localhost:9092")


if __name__ == "__main__":
    _LOGGER.info(f"Cyborg Regidores dump v{cyborg_regidores_version}.")
    _LOGGER.debug("DEBUG mode is enabled!")
    _LOGGER.debug(f"using Kafka bootstrap servers: {_KAFAK_BOOTSTRAP_SERVERS}")

    # let's get all that we got...
    consumer = KafkaConsumer(
        AICOE_ACTIVITY_TOPIC_NAME,
        bootstrap_servers=_KAFAK_BOOTSTRAP_SERVERS,
Exemple #25
0
# SrcMtrcs
# Copyright(C) 2018 Christoph Görn
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is Source Operations Metrics..."""

import logging

import daiquiri

from srcmtrcs import create_application, __version__

daiquiri.setup(level=logging.DEBUG, outputs=('stdout', 'stderr'))
_LOGGER = daiquiri.getLogger(__name__)

_LOGGER.info(f"Hi, I am the Source Operations Metrics bot, "
             f"and I'm running v{__version__}")

application = create_application()
Exemple #26
0
def main(input_file,
         regions_file,
         output_directory,
         input_signature,
         elements_file,
         elements,
         genome,
         element_mutations,
         cluster_mutations,
         smooth_window,
         cluster_window,
         kmer,
         n_simulations,
         simulation_mode,
         simulation_window,
         signature_calculation,
         signature_group,
         cores,
         seed,
         log_level,
         concatenate,
         clustplot,
         qqplot,
         gzip
         ):
    """
    OncodriveCLUSTL is a sequence based clustering method to identify cancer drivers across the genome

    Args:
        input_file (str): path to mutations file
        regions_file (str): path to input genomic coordinates file
        output_directory(str): path to output directory. Output files will be generated in it.
        input_signature (str): path to file containing input context based mutational probabilities.
            By default (when no input signatures), OncodriveCLUSTL will calculate them from the mutations input file.
        elements_file (str): path to file containing one element per row (optional) to analyzed the listed elements.
            By default, OncodriveCLUSTL analyzes all genomic elements contained in `regions_file`.
        elements (str): genomic element symbol (optional). The analysis will be performed only on the specified GEs.
        genome (str): genome to use: 'hg38', 'hg19', 'mm10', 'c3h', 'car', 'cast' and 'f344'
        element_mutations (int): minimum number of mutations per genomic element to undertake analysis
        cluster_mutations (int): minimum number of mutations to define a cluster
        smooth_window (int): Tukey kernel smoothing window length
        cluster_window (int): clustering window length
        kmer (int): context nucleotides to calculate the mutational probabilities (trinucleotides or pentanucleotides)
        n_simulations (int): number of simulations
        simulation_mode (str): simulation mode
        simulation_window (int): window length to simulate mutations
        signature_calculation (str): signature calculation, mutation frequencies (default) or mutation counts
            normalized by k-mer region counts
        signature_group (str): header of the column to group signatures. One signature will be computed for each group
        cores (int): number of CPUs to use
        seed (int): seed
        log_level (str): verbosity of the logger
        concatenate (bool): flag to calculate clustering on collapsed genomic regions (e.g., coding regions in a gene)
        clustplot (bool): flag to generate a needle plot with clusters for an element
        qqplot (bool): flat to generate a quantile-quantile (QQ) plot for a dataset
        gzip (bool): flag to generate GZIP compressed output files

    Returns:
        None

    """

    global logger

    # Get output directory
    if not os.path.exists(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Get cache directory
    path_cache = os.path.join(output_directory, 'cache')
    os.makedirs(path_cache, exist_ok=True)

    # Get output files name
    if elements_file is not None:
        output_file = elements_file.split('/')[-1]
    else:
        output_file = 'results'
    if gzip:
        elements_output_file = 'elements_{}.txt.gz'.format(output_file)
        clusters_output_file = 'clusters_{}.tsv.gz'.format(output_file)
    else:
        elements_output_file = 'elements_{}.txt'.format(output_file)
        clusters_output_file = 'clusters_{}.tsv'.format(output_file)

    daiquiri.setup(level=LOGS[log_level], outputs=(
        daiquiri.output.STDERR,
        daiquiri.output.File(
            filename=output_file + '.log',
            directory=output_directory
        )
    ))
    logger = daiquiri.getLogger()

    # suppress log messages from some libraries
    daiquiri.getLogger('bgdata').setLevel(logging.WARNING)
    daiquiri.getLogger('bgsignature').setLevel(logging.WARNING)

    logger.info('OncodriveCLUSTL')
    logger.info('\n'.join([
        '',
        'input_file: {}'.format(input_file),
        'regions_file: {}'.format(regions_file),
        'input_signature: {}'.format(input_signature),
        'output_directory: {}'.format(output_directory),
        'genome: {}'.format(genome),
        'element_mutations: {}'.format(element_mutations),
        'cluster_mutations: {}'.format(cluster_mutations),
        'concatenate: {}'.format(concatenate),
        'smooth_window: {}'.format(smooth_window),
        'cluster_window: {}'.format(cluster_window),
        'k-mer: {}'.format(kmer),
        'simulation_mode: {}'.format(simulation_mode),
        'simulation_window: {}'.format(simulation_window),
        'n_simulations: {}'.format(n_simulations),
        'signature_calculation: {}'.format(signature_calculation),
        'signature_group: {}'.format(signature_group),
        'cores: {}'.format(cores),
        'gzip: {}'.format(gzip),
        'seed: {}'.format(seed)
    ]))
    logger.info('Initializing OncodriveCLUSTL...')

    # Check parameters
    if simulation_window == 31 and smooth_window == 11 and cluster_window == 11:
        logger.warning(
            '\nRunning with default simulating, smoothing and clustering OncodriveCLUSTL parameters. '
            'Default parameters may not be optimal for your data.\n'
            'Please, read Supplementary Methods to perform model selection for your data.'
        )

    if not input_signature and signature_calculation == 'frequencies':
        logger.warning(
            '\nSignatures will be calculated as mutation frequencies: '
            '# mutated ref>alt k-mer counts / # total substitutions\n'
            'Please, read Supplementary Methods to perform a more accurate signatures calculation'
        )

    if signature_calculation == 'region_normalized':
        logger.warning(
            '\nMutation k-mer counts will be normalized by k-mer region counts in {}\n'
            'Only mutations inside regions will contribute for the signature calculation'.format(regions_file)
        )

    if n_simulations < 1000:
        raise excep.UserInputError('Invalid number of simulations: please choose an integer greater than 1000')

    if clustplot:
        if len(elements) > 10:
            raise excep.UserInputError('Needle plots can only be generated for a maximum of 10 elements')

    # Create a list of elements to analyze
    if elements is not None:
        elements = set(elements)
    if elements_file is not None:
        elements |= set([line.strip().split()[0] for line in open(elements_file, 'r')])
    if elements is None and elements_file is None:
        elements = set([])
    if elements:
        logger.info(
            'Input element{}: {}'.format('s' if len(elements) > 1 else '', len(elements))
        )
        logger.info(', '.join(elements))

    # Parse regions and dataset mutations
    logger.info('Parsing genomic regions and mutations...')
    regions_d, concat_regions_d, chromosomes_d, strands_d, mutations_d, samples_d, groups_d = pars.parse(
        regions_file,
        elements,
        input_file,
        concatenate,
        signature_group
    )
    # Summary
    mut = 0
    elem = 0
    element_mutations_cutoff = False
    for k, v in mutations_d.items():
        mut += len(v)
        elem += 1
        if not element_mutations_cutoff:
            if len(v) >= element_mutations:
                element_mutations_cutoff = True
    logger.info('Validated elements in genomic regions: {}'.format(len(regions_d.keys())))
    logger.info('Validated elements with mutations: {}'.format(elem))
    logger.info('Total substitution mutations: {}'.format(mut))
    if not element_mutations_cutoff:
        raise excep.UserInputError('No element found with enough mutations to perform analysis')

    # Signature
    file_prefix = input_file.split('/')[-1].split('.')[0]
    path_pickle = os.path.join(path_cache, '{}_kmer_{}.pickle'.format(file_prefix, kmer))

    if not input_signature:
        """
        Calculate signatures
        
        By default, all substutions are taken into account to calculate the relative frequencies for each
        k-mer ref>alt. 
         
        Alternatively, when specified through '--signature-calculation region_normalized', k-mer mutation counts 
        can be normalized by the k-mer counts in the regions under analysis listed in 'regions_file'. In this case, 
        only substitutions that fall inside the regions will contribute to the signature calculation.         
        
        For both options, k-mers are not collapsed (192 channels) and do not include N (unknown reference nucleotides).
        """
        logger.info('Computing signature{}...'.format('s for each group' if signature_group else ''))

        if signature_calculation == 'region_normalized':
            normalize_regions_file, signature_calc_function = regions_file, bgsign.normalize
        else:
            normalize_regions_file, signature_calc_function = None, bgsign.relative_frequency

        signatures_dict = signature_calc_function(
            mutations_file=input_file,
            regions_file=normalize_regions_file,
            kmer_size=int(kmer),
            genome_build=genome,
            collapse=None,
            group=signature_group,
            cores=cores
        )
        # Reformat dictionary
        if not signature_group:
            signatures_to_pickle = {file_prefix: signatures_dict}
        else:
            signatures_to_pickle = signatures_dict
        # Save to cache
        with open(path_pickle, 'wb') as fd:
            pickle.dump(signatures_to_pickle, fd, protocol=2)
        logger.info('Signature{} computed'.format('s' if signature_group else ''))
    else:
        try:
            load_sign = bgsign.file.load(file=input_signature)
        except UnicodeDecodeError:
            raise excep.UserInputError('Error in input signatures file {}\n'
                                       'Please, check signatures file format (JSON)'.format(input_signature))
        # Check format and save pickle to cache
        keys = set(load_sign.keys())
        if not signature_group:
            # Expects 'file_prefix' to be a key in signatures dictionary of dictionaries
            if file_prefix in keys:
                with open(path_pickle, 'wb') as fd:
                    pickle.dump(load_sign, fd, protocol=2)
            # When dictionary has k-mer (ex. AAA>T) as keys, accepts dictionary and adds extra key for 'file_prefix'
            elif '>' in list(keys)[0]:
                with open(path_pickle, 'wb') as fd:
                    pickle.dump({file_prefix: load_sign}, fd, protocol=2)
            # Error, 'file_prefix' and k-mers not found as keys, check format
            else:
                raise excep.UserInputError('Incorrect format for input signature file {}'.format(input_signature))
        else:
            if not '>' in list(keys)[0]:
                with open(path_pickle, 'wb') as fd:
                    pickle.dump(load_sign, fd, protocol=2)
            # n signature dictionaries are expected (n = groups)
            else:
                raise excep.UserInputError('Groups are missing in signature dictionary at {}'.format(input_signature))

        logger.info('Input signature{} ready'.format('s' if signature_group else ''))

    # Initialize Experiment class variables and run
    elements_results, clusters_results, global_info_results = exp.Experiment(
                                                                            regions_d,
                                                                            concat_regions_d,
                                                                            chromosomes_d,
                                                                            strands_d,
                                                                            mutations_d,
                                                                            samples_d,
                                                                            genome,
                                                                            groups_d,
                                                                            path_pickle,
                                                                            element_mutations,
                                                                            cluster_mutations,
                                                                            smooth_window,
                                                                            cluster_window,
                                                                            int(kmer),
                                                                            n_simulations,
                                                                            simulation_mode,
                                                                            simulation_window,
                                                                            cores,
                                                                            clustplot,
                                                                            seed
                                                                            ).run()

    # Write elements results (returns list of ranked elements)
    sorted_list_elements = postp.write_element_results(
        genome=genome,
        results=(elements_results, global_info_results),
        directory=output_directory,
        file=elements_output_file,
        is_gzip=gzip
    )
    logger.info('Elements results calculated')

    # Write clusters results
    postp.write_cluster_results(
        genome=genome,
        results=(clusters_results, global_info_results),
        directory=output_directory,
        file=clusters_output_file,
        sorter=sorted_list_elements,
        is_gzip=gzip
    )
    logger.info('Clusters results calculated')

    # Cluster plot
    if clustplot:
        info_cluster_plots = cplot.make_clustplot(
            elements_results,
            clusters_results,
            global_info_results,
            directory=output_directory
        )
        for message in info_cluster_plots:
            logger.info(message)

    # Quantile-quantile plot
    if qqplot:
        if len(elements) < 30:
            logger.warning('QQ-plot generated for less than 30 elements')
        input_qqplot_file = os.path.join(output_directory, elements_output_file)
        output_qqplot_file = os.path.join(output_directory, 'quantile_quantile_plot.png')
        qplot.make_qqplot(
            file=input_qqplot_file,
            output=output_qqplot_file,
            col_values='P_ANALYTICAL',
            top=10
        )
        logger.info('QQ-plot plot generated at : {}'.format(output_qqplot_file))
    logger.info('Finished')
"""Get all titles currently in the RSS feeds for testing purposes."""
import configparser
import logging
import daiquiri
from cds_paper_bot import read_feed, format_title

daiquiri.setup(level=logging.ERROR)


def load_config(feed_file):
    """Load configs into dict."""
    config_dict = {}
    config = configparser.RawConfigParser()
    # load the feed config
    config.read(feed_file)
    config_dict = {}
    for key in config:
        config_dict[key] = config[key]
    return config_dict


def main():
    """Load the feeds, print all titles in a format useful for dumping into test_format_title.py."""
    config = load_config("feeds.ini")
    # print(config)
    for experiment in config:
        for pub_type in config[experiment]:
            print(f"            # {experiment} {pub_type}")
            this_feed = read_feed(config[experiment][pub_type])
            if this_feed:
                this_feed_entries = this_feed["entries"]
Exemple #28
0
You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import sys
import daiquiri
import numpy as np

from recommendation_engine.config.params_scoring import ScoringParams
from recommendation_engine.config.path_constants import PMF_MODEL_PATH, PACKAGE_TAG_MAP, \
    TRAINING_DATA_ITEMS, PRECOMPUTED_STACKS, ID_TO_PACKAGE_MAP, PACKAGE_TO_ID_MAP
from recommendation_engine.model.pmf_prediction import PMFScoring
from recommendation_engine.predictor.abstract_recommender import AbstractRecommender
from recommendation_engine.utils.fileutils import load_rating

daiquiri.setup(level=logging.WARNING)
_logger = daiquiri.getLogger(__name__)


class PMFRecommendation(AbstractRecommender):
    """Online recommendation logic.

    This class contains the online recommendation logic that will be used to
    score packages to the user's preferences at runtime. We need to run a
    single step of PMF and multiply the obtained user vector with the
    precomputed latent item vectors.
    """
    def __init__(self,
                 M,
                 data_store,
                 num_latent=ScoringParams.num_latent_factors):
Exemple #29
0
def prepare_service(args=None, conf=None,
                    default_config_files=None,
                    log_to_std=False, logging_level=None,
                    skip_log_opts=False):
    if conf is None:
        conf = cfg.ConfigOpts()
    # FIXME(jd) Use the pkg_entry info to register the options of these libs
    db_options.set_defaults(conf)

    # Register our own Gnocchi options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    conf.register_cli_opts(opts._cli_options)

    workers = utils.get_default_workers()
    conf.set_default("workers", workers, group="metricd")
    conf.set_default("parallel_operations", workers)

    conf(args, project='gnocchi', validate_default_values=True,
         default_config_files=default_config_files,
         version=gnocchi.__version__)

    utils.parallel_map.MAX_WORKERS = conf.parallel_operations

    if not log_to_std and (conf.log_dir or conf.log_file):
        outputs = [daiquiri.output.File(filename=conf.log_file,
                                        directory=conf.log_dir)]
    else:
        outputs = [daiquiri.output.STDERR]

    if conf.use_syslog:
        outputs.append(
            daiquiri.output.Syslog(facility=conf.syslog_log_facility))

    if conf.use_journal:
        outputs.append(daiquiri.output.Journal())

    daiquiri.setup(outputs=outputs)
    if logging_level is None:
        if conf.debug:
            logging_level = logging.DEBUG
        elif conf.verbose:
            logging_level = logging.INFO
        else:
            logging_level = logging.WARNING
    logging.getLogger("gnocchi").setLevel(logging_level)

    # HACK(jd) I'm not happy about that, fix AP class to handle a conf object?
    archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = (
        conf.archive_policy.default_aggregation_methods
    )

    # If no coordination URL is provided, default to using the indexer as
    # coordinator
    if conf.coordination_url is None:
        if conf.storage.driver == "redis":
            conf.set_default("coordination_url",
                             conf.storage.redis_url)
        elif conf.incoming.driver == "redis":
            conf.set_default("coordination_url",
                             conf.incoming.redis_url)
        else:
            parsed = urlparse.urlparse(conf.indexer.url)
            proto, _, _ = parsed.scheme.partition("+")
            parsed = list(parsed)
            # Set proto without the + part
            parsed[0] = proto
            conf.set_default("coordination_url",
                             urlparse.urlunparse(parsed))

    if not skip_log_opts:
        LOG.info("Gnocchi version %s", gnocchi.__version__)
        conf.log_opt_values(LOG, logging.DEBUG)

    return conf
Exemple #30
0
from soh.server.server import ApacheServer
from soh.server.server import ApacheTomcatServer
from soh.server.server import AuditServer
from soh.server.server import AuthServer
from soh.server.server import GmnServer
from soh.server.server import JettyServer
from soh.server.server import LdapServer
from soh.server.server import PackageServer
from soh.server.server import PortalServer
from soh.server.server import Server
from soh.server.server import SolrServer
from soh.server.server import TomcatServer

cwd = os.path.dirname(os.path.realpath(__file__))
logfile = cwd + "/health_check.log"
daiquiri.setup(level=logging.WARN,
               outputs=(daiquiri.output.File(logfile), "stdout"))

logger = daiquiri.getLogger("health_check.py: " + __name__)

new_status = dict()


async def check_read_only(hosts):
    for host in hosts:
        await do_read_only(host)


async def do_read_only(host):
    st = datetime.now()
    host_ro = await server.read_only(host=host)
    logger.warning(f"Run time read only {host}: {datetime.now() - st}")
Exemple #31
0
                        +-------------------+                      [ r2 ]
                                                                   [ r3 ]
                                                                    ....

"""

import json
import os

import boto3
import daiquiri

from wellcome_aws_utils import s3_utils, sns_utils
from wellcome_aws_utils.lambda_utils import log_on_error

daiquiri.setup(level=os.environ.get('LOG_LEVEL', 'INFO'))


@log_on_error
def main(event, _ctxt=None, s3_client=None, sns_client=None):
    topic_arn = os.environ["TOPIC_ARN"]

    s3_client = s3_client or boto3.client('s3')
    sns_client = sns_client or boto3.client('sns')

    s3_events = s3_utils.parse_s3_record(event=event)
    assert len(s3_events) == 1
    s3_event = s3_events[0]

    resp = s3_client.get_object(Bucket=s3_event['bucket_name'],
                                Key=s3_event['object_key'])
# import packages for data manipulation
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import janitor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error, r2_score
from xgboost import XGBRegressor

#logging package
import daiquiri,logging
daiquiri.setup(level=logging.INFO)
logger = daiquiri.getLogger()


#Loading data
logger.info("Reading the data file...")
df = pd.read_csv('../data/avocado.csv')

logger.info("Quick preprocessing of data...")
# Removing index column
df.drop('Unnamed: 0', axis=1, inplace=True)
# Removing records with TotalUS region, assuming it is nust the average of all other regions
df = df.loc[df.region!='TotalUS'].reset_index(drop=True)
# Making date to datetime and sorting chrinologically
df['Date'] = pd.to_datetime(df['Date'])
df = df.sort_values(['region','Date'])
Exemple #33
0
"""Deeposlandia package
"""

from configparser import ConfigParser
import logging
import os
import sys

import daiquiri

__version__ = '0.4'

daiquiri.setup(
    level=logging.INFO,
    outputs=(daiquiri.output.Stream(
        formatter=daiquiri.formatter.ColorFormatter(
            fmt=("%(asctime)s :: %(levelname)s :: %(module)s :: "
                 "%(funcName)s : %(color)s%(message)s%(color_stop)s"))), ))
logger = daiquiri.getLogger("root")

_DEEPOSL_CONFIG = os.getenv('DEEPOSL_CONFIG')
_DEEPOSL_CONFIG = _DEEPOSL_CONFIG if _DEEPOSL_CONFIG is not None else "config.ini"
config = ConfigParser()
if os.path.isfile(_DEEPOSL_CONFIG):
    config.read(_DEEPOSL_CONFIG)
else:
    logger.error("No file config.ini!")
    sys.exit(1)
Exemple #34
0
"""Statistical methods used for analyzing the shared bike data
"""

import logging
import daiquiri

import numpy as np
import pandas as pd
from dateutil import parser
from workalendar.europe import France
from sklearn.cluster import KMeans
import xgboost as xgb

from jitenshea import config

daiquiri.setup(logging.INFO)
logger = daiquiri.getLogger("stats")

# French Calendar
cal = France()

SEED = 2018
np.random.seed(SEED)

CLUSTER_ACT_PATH_CSV ='jitenshea/data/cluster_activite.csv'


###################################
###         CLUSTER ACTIVITE
###################################
import daiquiri
import daiquiri.formatter
import logging
import sys
import test_print_log_sub_module

log_file = sys.path[0] + "/test.log"
daiquiri.setup(
    level=logging.INFO,
    outputs=(
        daiquiri.output.Stream(formatter=daiquiri.formatter.ColorFormatter(
            fmt="%(asctime)s [PID %(process)d] [%(levelname)s] "
                "%(name)s:%(lineno)d %(message)s")),
        daiquiri.output.File(log_file,
                             formatter=daiquiri.formatter.ColorFormatter(
                                 fmt="%(asctime)s [PID %(process)d] "
                                     "[%(levelname)s] %(name)s:%(lineno)d "
                                     "%(message)s")),
    )
)

logger = daiquiri.getLogger(__name__)

if __name__ == "__main__":
    logger.info("log some info")
    logger.warning("log some warning")
    logger.error("log some error")
    logger.debug("log some debug")
    logger.info("some data is:%s" % ("testdata"))
    test_print_log_sub_module.print_log()
Exemple #36
0
def setup_logging():
    """Configure logging."""
    daiquiri.setup(level=logging.DEBUG, outputs=(daiquiri.output.STDOUT, ))
# -*- coding: utf-8 -*-
""":Mod: __init__

:Synopsis:

:Author:
    servilla
  
:Created:
    12/30/17
"""

import logging
import os
import sys

sys.path.insert(0, os.path.abspath('..'))

import daiquiri

cwd = os.path.dirname(os.path.realpath(__file__))
logfile = cwd + '/tests.log'
daiquiri.setup(level=logging.INFO, outputs=(daiquiri.output.File(logfile), ))


def main():
    return 0


if __name__ == "__main__":
    main()
Exemple #38
0

import daiquiri
import logging

from itertools import groupby
from datetime import datetime, timedelta
from collections import namedtuple

import pandas as pd

from jitenshea import config
from jitenshea.iodb import db


daiquiri.setup(level=logging.INFO)
logger = daiquiri.getLogger(__name__)

CITIES = ('bordeaux',
          'lyon')
TimeWindow = namedtuple('TimeWindow', ['start', 'stop', 'order_reference_date'])


def processing_daily_data(rset, window):
    """Re arrange when it's necessary the daily transactions data

    rset: ResultProxy by SQLAlchemy
        Result of a SQL query

    Return a list of dicts
    """
Exemple #39
0
    def setUpClass(self):
        super(TestCase, self).setUpClass()

        self.conf = service.prepare_service([],
                                            conf=utils.prepare_conf(),
                                            default_config_files=[],
                                            logging_level=logging.DEBUG)

        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
            ))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id',
                                   "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key',
                                   "anythingworks",
                                   group="storage")

        self.index = indexer.get_driver(self.conf)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')
        if storage_driver == 'ceph':
            self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                                   'storage')
E-mail: zhangsuofei at njupt.edu.cn | hangyu5 at illinois.edu
"""

import tensorflow as tf
import tensorflow.contrib.slim as slim
from config import cfg, get_coord_add, get_dataset_size_train, get_num_classes, get_create_inputs
import time
import numpy as np
import sys
import os
import capsnet_em as net

import logging
import daiquiri

daiquiri.setup(level=logging.DEBUG)
logger = daiquiri.getLogger(__name__)


def main(args):
    """Get dataset hyperparameters."""
    assert len(args) == 2 and isinstance(args[1], str)
    dataset_name = args[1]
    logger.info('Using dataset: {}'.format(dataset_name))

    """Set reproduciable random seed"""
    tf.set_random_seed(1234)

    coord_add = get_coord_add(dataset_name)
    dataset_size = get_dataset_size_train(dataset_name)
    num_classes = get_num_classes(dataset_name)
Exemple #41
0
import logging
import psycopg2
import daiquiri
from psycopg2.extras import DictCursor

from repo.app import REVIEW_DB_SETTINGS
from review.compare import diffs
from review.database import query_review_reports_old_metrics, update_metrics

daiquiri.setup(
    level=logging.DEBUG,
    outputs=(
        daiquiri.output.File('poll-errors.log', level=logging.ERROR),
        daiquiri.output.RotatingFile(
            'poll-debug.log',
            level=logging.DEBUG,
            # 10 MB
            max_size_bytes=10000000)))


def get_review_db():
    db = psycopg2.connect(**REVIEW_DB_SETTINGS)
    return db


def _update_metrics():
    before = {}
    before_list = []
    after = {}
    after_list = []
    review_db = get_review_db()