示例#1
0
def main(args):
    logger = log.setup_logger(args)

    # Lets cuDNN benchmark conv implementations and choose the fastest.
    # Only good if sizes stay the same within the main loop!
    torch.backends.cudnn.benchmark = True

    in_set, out_set, in_loader, out_loader = mk_id_ood(args, logger)

    classes_per_group = np.load(args.group_config)
    args.num_groups = len(classes_per_group)
    group_slices = get_group_slices(classes_per_group)
    group_slices.cuda()
    num_logits = len(in_set.classes) + args.num_groups

    logger.info(f"Loading model from {args.model_path}")
    model = resnetv2.KNOWN_MODELS[args.model](head_size=num_logits)

    state_dict = torch.load(args.model_path)
    model.load_state_dict_custom(state_dict['model'])

    model = torch.nn.DataParallel(model)
    model = model.cuda()

    start_time = time.time()
    run_eval(model, in_loader, out_loader, logger, group_slices)
    end_time = time.time()

    logger.info("Total running time: {}".format(end_time - start_time))
示例#2
0
def main(args):
    logger = log.setup_logger(args)

    torch.backends.cudnn.benchmark = True

    in_set, out_set, in_loader, out_loader = mk_id_ood(args, logger)

    logger.info(f"Loading model from {args.model_path}")
    model = resnetv2.KNOWN_MODELS[args.model](head_size=len(in_set.classes))

    state_dict = torch.load(args.model_path)
    model.load_state_dict_custom(state_dict['model'])

    model = torch.nn.DataParallel(model)
    model = model.cuda()

    start_time = time.time()
    run_eval(model,
             in_loader,
             out_loader,
             logger,
             args,
             num_classes=len(in_set.classes))
    end_time = time.time()

    logger.info("Total running time: {}".format(end_time - start_time))
示例#3
0
def main(args):
    logger = log.setup_logger(args)

    # Lets cuDNN benchmark conv implementations and choose the fastest.
    # Only good if sizes stay the same within the main loop!
    torch.backends.cudnn.benchmark = True

    train_set, val_set, train_loader, val_loader = mktrainval(args, logger)

    logger.info(f"Loading model from {args.model_path}")
    model = resnetv2.KNOWN_MODELS[args.model](head_size=len(train_set.classes))
    state_dict = torch.load(args.model_path)
    model.load_state_dict_custom(state_dict['model'])

    logger.info("Moving model onto all GPUs")
    model = torch.nn.DataParallel(model)
    model = model.cuda()

    logger.info('Tuning hyper-parameters...')
    sample_mean, precision, best_regressor, best_magnitude \
        = tune_mahalanobis_hyperparams(args, model, len(val_set.classes), train_loader, val_loader, logger)

    logger.info('saving results...')
    save_dir = os.path.join(args.logdir, args.name)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    np.save(
        os.path.join(save_dir, 'results'),
        np.array([
            sample_mean, precision, best_regressor.coef_,
            best_regressor.intercept_, best_magnitude
        ]))
示例#4
0
def run():
    parser = argparse.ArgumentParser(description='Run LTLS experiments')
    parser.add_argument('dataset', type=str, help='dataset name')
    args = parser.parse_args()
    path_trai = 'data/{0}/{0}.train'.format(args.dataset)
    path_vali = 'data/{0}/{0}.heldout'.format(args.dataset)
    path_test = 'data/{0}/{0}.test'.format(args.dataset)
    path_trsr = 'data/{0}/{0}.train_sorted'.format(args.dataset)
    path_vasr = 'data/{0}/{0}.heldout_sorted'.format(args.dataset)
    path_tesr = 'data/{0}/{0}.test_sorted'.format(args.dataset)
    model_dir = 'models/{0}'.format(args.dataset)
    log = 'logs/{0}'.format(args.dataset)
    setup_logger(log)

    params = {
        #                       path_trai, path_vali, path_test, policy,      model_dir, it,  l,      multilabel, es,     validate,   n_features
        'sector':
        Params(path_trai, path_vali, path_test, 'ranking', model_dir, 7, 0,
               False, False, True, 55197),
        'bibtex':
        Params(path_trsr, path_vasr, path_tesr, 'seriation', model_dir, 2, 0,
               True, False, True, 1837, 1),
    }

    train._train(params[args.dataset].path_train,
                 params[args.dataset].path_validation,
                 params[args.dataset].policy, params[args.dataset].model_dir,
                 params[args.dataset].it, params[args.dataset].l,
                 params[args.dataset].multilabel, params[args.dataset].es,
                 params[args.dataset].validate,
                 params[args.dataset].n_features)

    predict._predict(params[args.dataset].path_test,
                     params[args.dataset].model_dir,
                     params[args.dataset].multilabel,
                     params[args.dataset].n_features)
示例#5
0
The types can be retrieved using the :py:class:`utils.events.EventTool` by calling
:py:meth:`utils.events.EventTool.all_event_types` on it.
"""
from datetime import datetime
import logging

import pytest

from fixtures.artifactor_plugin import art_client, get_test_idents
from fixtures.pytest_store import store
from utils.datafile import template_env
from utils.log import setup_logger
from utils.wait import wait_for, TimedOutError

# xxx better logger name
logger = setup_logger(logging.getLogger("events"))


class HTMLReport(object):
    def __init__(self, test_name, registered_events, all_events):
        self.registered_events = registered_events
        self.test_name = test_name
        self.all_events = all_events

    def generate(self):
        template = template_env.get_template("event_testing.html")
        return template.render(
            test_name=self.test_name, registered_events=self.registered_events, all_events=self.all_events
        )

示例#6
0
def main():
    flag_values = [
        ("method", FLAGS.method),
        ("net", FLAGS.net),
        ("inducing", FLAGS.n_inducing),
        ("beta0", FLAGS.beta0),
        ("gamma", FLAGS.gamma),
        ("niter", FLAGS.n_iters),
        ("bs", FLAGS.batch_size // 2),
        ("m", FLAGS.batch_size // 2),
        ("lr", FLAGS.learning_rate),
        ("measure", FLAGS.measure),
        ("hyper_rate", FLAGS.hyper_rate),
        ("block", FLAGS.block_size),
        ("note", FLAGS.note),
    ]
    flag_str = "$".join(["@".join([i[0], str(i[1])]) for i in flag_values])
    result_path = os.path.join("results", "classification", FLAGS.dataset,
                               flag_str)
    logger = setup_logger("classification",
                          __file__,
                          result_path,
                          filename="log")

    np.random.seed(1234)
    tf.set_random_seed(1234)

    # Load MNIST
    if FLAGS.dataset == "mnist":
        train_x, train_y, valid_x, valid_y, test_x, test_y = load_mnist_realval(
            dtype=np.float64)
        train_x = np.vstack([train_x, valid_x])
        train_y = np.vstack([train_y, valid_y])
        input_shape = [1, 28, 28]
    elif FLAGS.dataset == "cifar10":
        train_x, train_y, test_x, test_y = load_cifar10(dtype=np.float64)
        input_shape = [3, 32, 32]
    else:
        raise NotImplementedError()

    train_x = 2 * train_x - 1
    test_x = 2 * test_x - 1

    train = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    test = tf.data.Dataset.from_tensor_slices((test_x, test_y))
    train = train.shuffle(buffer_size=1000).batch(FLAGS.batch_size //
                                                  2).repeat()
    test = test.batch(FLAGS.batch_size * 4)

    if FLAGS.measure == "test_x":
        measure = tf.data.Dataset.from_tensor_slices(test_x)
    else:
        measure = tf.data.Dataset.from_tensor_slices(train_x)
    measure = measure.shuffle(buffer_size=1000).batch(FLAGS.batch_size //
                                                      2).repeat()
    measure_iterator = measure.make_one_shot_iterator()
    measure_batch = measure_iterator.get_next()

    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, train.output_types,
                                                   train.output_shapes)
    next_batch = iterator.get_next()

    train_iterator = train.make_one_shot_iterator()
    test_iterator = test.make_initializable_iterator()

    sess = tf.Session()

    train_handle = sess.run(train_iterator.string_handle())
    test_handle = sess.run(test_iterator.string_handle())

    Data = namedtuple("Data", [
        "next_batch", "measure_batch", "handle", "train_handle", "test_handle",
        "test_iterator", "train_x", "train_y"
    ])
    data = Data(next_batch, measure_batch, handle, train_handle, test_handle,
                test_iterator, train_x, train_y)

    block_sizes = [FLAGS.block_size] * 3
    block_strides = [1, 2, 2]
    with tf.variable_scope("prior"):
        resnet_kern = ResnetKernel(
            input_shape=input_shape,
            block_sizes=block_sizes,
            block_strides=block_strides,
            kernel_size=3,
            recurse_kern=ReLUKernel(),
            var_weight=1.,
            var_bias=0.,
            conv_stride=1,
            data_format="NCHW",
            dtype=tf.float64,
        )

    sess.run(tf.variables_initializer(tf.trainable_variables("prior")))

    # SVGP
    if FLAGS.method == "svgp":
        svgp(logger, sess, data, resnet_kern)
    elif FLAGS.method == "gpnet":
        gpnet(logger, sess, data, resnet_kern, dtype=tf.float64)
    elif FLAGS.method == "gpnet_nonconj":
        gpnet_nonconj(logger, sess, data, resnet_kern, dtype=tf.float64)
    elif FLAGS.method == "fbnn":
        fbnn(logger, sess, data, resnet_kern, dtype=tf.float64)
示例#7
0
def main():
    flag_values = [
        ("method", FLAGS.method),
        ("net", FLAGS.net),
        ("res", FLAGS.residual),
        ("nh", FLAGS.n_hidden),
        ("nl", FLAGS.n_layer),
        ("inducing", FLAGS.n_inducing),
        ("pre", FLAGS.pretrain),
        ("hyper_lr", FLAGS.hyper_rate),
        ("hyper_anneal", FLAGS.hyper_anneal),
        ("beta0", FLAGS.beta0),
        ("gamma", FLAGS.gamma),
        ("niter", FLAGS.n_iters),
        ("bs", FLAGS.batch_size),
        ("m", FLAGS.m),
        ("lr", FLAGS.learning_rate),
        ("measure", FLAGS.measure),
        ("lr_anneal", FLAGS.lr_anneal),
        ("fix_rf_ls", FLAGS.fix_rf_ls),
        ("note", FLAGS.note),
    ]
    flag_str = "$".join(["@".join([i[0], str(i[1])]) for i in flag_values])
    result_path = os.path.join("results", "regression", FLAGS.dataset,
                               flag_str, "run_{}".format(FLAGS.split))
    logger = setup_logger("regression", __file__, result_path, filename="log")
    test_writer = tf.summary.FileWriter(result_path)

    tf.set_random_seed(1234)
    np.random.seed(1234)
    uci = load_regression(FLAGS.dataset)
    if hasattr(uci, "load"):
        uci.load(split=FLAGS.split)
    train_x, train_y = uci.train_x, uci.train_y
    test_x, test_y = uci.test_x, uci.test_y
    n_train, _ = train_x.shape

    train = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    test = tf.data.Dataset.from_tensor_slices((test_x, test_y))
    train = train.shuffle(buffer_size=10000).batch(FLAGS.batch_size).repeat()
    test = test.batch(FLAGS.batch_size)

    if FLAGS.measure == "uniform":
        raise NotImplementedError()
    else:
        if FLAGS.measure == "test":
            measure = tf.data.Dataset.from_tensor_slices(test_x)
        else:
            measure = tf.data.Dataset.from_tensor_slices(train_x)
        measure = measure.shuffle(buffer_size=10000).batch(FLAGS.m).repeat()
        measure_iterator = measure.make_one_shot_iterator()
        measure_batch = measure_iterator.get_next()

    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, train.output_types,
                                                   train.output_shapes)
    next_batch = iterator.get_next()

    train_iterator = train.make_one_shot_iterator()
    test_iterator = test.make_initializable_iterator()

    sess = tf.Session()

    train_handle = sess.run(train_iterator.string_handle())
    test_handle = sess.run(test_iterator.string_handle())

    Data = namedtuple("Data", [
        "next_batch", "measure_batch", "handle", "train_handle", "test_handle",
        "test_iterator", "orig"
    ])
    data = Data(next_batch,
                None,
                handle,
                train_handle,
                test_handle,
                test_iterator,
                orig=uci)

    # SVGP
    if FLAGS.method == "svgp":
        svgp(logger, sess, data, test_writer=test_writer)
    else:
        gp, _ = exact_gp(logger, sess, data, n_epochs=FLAGS.pretrain)
        if (FLAGS.measure == "noise") and (FLAGS.pretrain > 0):
            logger.info(sess.run(gp.kern.lengthscales))
            measure_batch += tf.random_normal(
                tf.shape(measure_batch),
                dtype=tf.float64) * (gp.kern.lengthscales / np.sqrt(2))
        data = data._replace(measure_batch=measure_batch)
        # fBNN
        if FLAGS.method == "fbnn":
            fbnn(logger, sess, data, prior_gp=gp, test_writer=test_writer)
        # GPIN
        if FLAGS.method == "gpnet":
            gpnet(logger, sess, data, prior_gp=gp, test_writer=test_writer)
        # weight-space RFE
        if FLAGS.method == "rfe":
            rfe(logger, sess, data, prior_gp=gp, test_writer=test_writer)

    sess.close()
示例#8
0
def main(args):
    logger = log.setup_logger(args)
    writer = SummaryWriter(pjoin(args.logdir, args.name, 'tensorboard_log'))

    # Lets cuDNN benchmark conv implementations and choose the fastest.
    # Only good if sizes stay the same within the main loop!
    torch.backends.cudnn.benchmark = True

    if args.finetune_type == 'group_softmax':
        classes_per_group = np.load(args.group_config)
        args.num_groups = len(classes_per_group)
        group_slices = get_group_slices(classes_per_group)
        group_slices.cuda()
    else:
        classes_per_group, args.num_groups, group_slices = None, None, None

    train_set, valid_set, train_loader, valid_loader = mktrainval(args, logger)

    num_logits = len(train_set.classes)
    if args.finetune_type == 'group_softmax':
        num_logits = len(train_set.classes) + args.num_groups

    model = resnetv2.KNOWN_MODELS[args.model](
        head_size=num_logits,
        zero_head=True,
        num_block_open=args.num_block_open)

    model_path = pjoin(args.bit_pretrained_dir, args.model + '.npz')
    logger.info(f"Loading model from {model_path}")
    model.load_from(np.load(model_path))

    logger.info("Moving model onto all GPUs")
    model = torch.nn.DataParallel(model)

    # Optionally resume from a checkpoint.
    # Load it to CPU first as we'll move the model to GPU later.
    # This way, we save a little bit of GPU memory when loading.
    step = 0

    # Note: no weight-decay!
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optim = torch.optim.SGD(trainable_params, lr=args.base_lr, momentum=0.9)

    # Resume fine-tuning if we find a saved model.
    savename = pjoin(args.logdir, args.name, "bit.pth.tar")
    try:
        logger.info(f"Model will be saved in '{savename}'")
        checkpoint = torch.load(savename, map_location="cpu")
        logger.info(f"Found saved model to resume from at '{savename}'")

        step = checkpoint["step"]
        model.load_state_dict(checkpoint["model"])
        optim.load_state_dict(checkpoint["optim"])
        logger.info(f"Resumed at step {step}")
    except FileNotFoundError:
        logger.info("Fine-tuning from BiT")

    model = model.cuda()
    optim.zero_grad()

    model.train()

    mixup = finetune_utils.get_mixup(len(train_set))
    cri = torch.nn.CrossEntropyLoss().cuda()

    logger.info("Starting finetuning!")
    accum_steps = 0
    mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1

    for x, y in recycle(train_loader):

        # Schedule sending to GPU(s)
        x = x.cuda()
        y = y.cuda()

        # Update learning-rate, including stop training if over.
        lr = finetune_utils.get_lr(step, len(train_set), args.base_lr)

        if lr is None:
            break
        for param_group in optim.param_groups:
            param_group["lr"] = lr

        if mixup > 0.0:
            x, y_a, y_b = mixup_data(x, y, mixup_l)

        # compute output
        logits = model(x)

        if args.finetune_type == 'group_softmax':
            if mixup > 0.0:
                c = mixup_criterion_group(cri, logits, y_a, y_b, mixup_l,
                                          group_slices)
            else:
                c = calc_group_softmax_loss(cri, logits, y, group_slices)
        else:
            if mixup > 0.0:
                c = mixup_criterion_flat(cri, logits, y_a, y_b, mixup_l)
            else:
                c = cri(logits, y)
        c_num = float(c.data.cpu().numpy())  # Also ensures a sync point.

        # Accumulate grads
        (c / args.batch_split).backward()
        accum_steps += 1

        accstep = f" ({accum_steps}/{args.batch_split})" if args.batch_split > 1 else ""
        logger.info(f"[step {step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})")  # pylint: disable=logging-format-interpolation
        logger.flush()

        writer.add_scalar('Train/loss', c_num, step)

        # Update params
        if accum_steps == args.batch_split:
            optim.step()
            optim.zero_grad()
        step += 1
        accum_steps = 0
        # Sample new mixup ratio for next batch
        mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1

        # Run evaluation and save the model.
        if args.eval_every and step % args.eval_every == 0:
            run_eval(model, valid_loader, logger, step, writer, group_slices)
            if args.save:
                torch.save(
                    {
                        "step": step,
                        "model": model.state_dict(),
                        "optim": optim.state_dict(),
                    }, savename)

    # Final eval at end of training.
    run_eval(model, valid_loader, logger, step, writer, group_slices)
    if args.save:
        torch.save(
            {
                "step": step,
                "model": model.state_dict(),
                "optim": optim.state_dict(),
            }, savename)
示例#9
0
certain object's id.

Default match algorithm is ==. Event also accepts match function in order to change default
match type.
"""
import logging
import pytest

from utils.appliance import get_or_create_current_appliance
from utils.events import EventListener
from utils.log import setup_logger
from utils.wait import wait_for, TimedOutError


# xxx better logger name
logger = setup_logger(logging.getLogger('events'))


class EventListenerWrapper(object):

    def __init__(self):
        self._cur_appliance = get_or_create_current_appliance()
        self._instances = []

    def _register_instance(self, inst):
        self._instances.append(inst)

    def _unregister_instance(self):
        self._instances.pop()

    def new_instance(self):
示例#10
0
from utils.appliance import Appliance
from utils.trackerbot import api
from utils.log import setup_logger
from slumber.exceptions import HttpClientError


token = docker_conf['gh_token']
owner = docker_conf['gh_owner']
repo = docker_conf['gh_repo']

tapi = api()

CONT_LIMIT = docker_conf['workers']
DEBUG = docker_conf.get('debug', False)

logger = setup_logger(logging.getLogger('prt'))

# Disable pika logs
logging.getLogger("pika").propagate = False


def send_message_to_bot(msg):

    required_fields = set(['rabbitmq_url', 'gh_queue', 'gh_channel', 'gh_message_type'])
    if not required_fields.issubset(docker_conf.viewkeys()):
        logger.warn("Skipping - docker.yaml doesn't have {}".format(required_fields))
        return

    logger.info("Github PR bot: about to send '{}'".format(msg))
    url = docker_conf['rabbitmq_url']
    queue = docker_conf['gh_queue']
示例#11
0
@author: mrmopoz
'''

import threading
from utils.log import setup_logger
from process.state import state
from process.video import video
from process.command import command
from brain.controller import controller
from collections import deque

if __name__ == '__main__':
    # 1. Set logs
    # logger for state
    state_logger = setup_logger('state_logger', './logs/tello_state.log')
    # logger for commands and responds from UAV
    command_logger = setup_logger('command_logger', './logs/tello_command.log')
    # logger for errors
    error_logger = setup_logger('error_logger', './logs/tello_error.log')
    # message for bot
    command_queue = deque()
    # respond from bot
    respond_queue = deque()

    fly_mode = 'program'  # or 'learn'
    program_path = './fly_programs/program_1'

    # controller thread create
    controllerThread = threading.Thread(target=controller,
                                        args=(command_queue, respond_queue,
示例#12
0
def main():
    result_path = os.path.join("results", "toy", FLAGS.dataset, FLAGS.method)
    logger = setup_logger("toy", __file__, result_path, filename="log")

    tf.set_random_seed(1234)
    np.random.seed(1234)

    toy = load_snelson_data(n=100)
    train_x, train_y = toy.train_x, toy.train_y
    test_x, test_y = toy.test_x, np.zeros_like(toy.test_x)

    train = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    test = tf.data.Dataset.from_tensor_slices((test_x, test_y))
    train = train.shuffle(buffer_size=100).batch(FLAGS.batch_size).repeat()
    test = test.batch(FLAGS.batch_size)

    if FLAGS.measure == "uniform":
        measure_batch = tf.random_uniform([FLAGS.m],
                                          toy.x_min,
                                          toy.x_max,
                                          dtype=tf.float64)[:, None]
    else:
        measure = tf.data.Dataset.from_tensor_slices(train_x)
        measure = measure.shuffle(buffer_size=20).batch(FLAGS.m).repeat()
        measure_iterator = measure.make_one_shot_iterator()
        measure_batch = measure_iterator.get_next()

    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, train.output_types,
                                                   train.output_shapes)
    next_batch = iterator.get_next()

    train_iterator = train.make_one_shot_iterator()
    test_iterator = test.make_initializable_iterator()

    sess = tf.Session()

    train_handle = sess.run(train_iterator.string_handle())
    test_handle = sess.run(test_iterator.string_handle())

    Data = namedtuple("Data", [
        "next_batch", "measure_batch", "handle", "train_handle", "test_handle",
        "test_iterator", "orig"
    ])
    data = Data(next_batch,
                measure_batch,
                handle,
                train_handle,
                test_handle,
                test_iterator,
                orig=toy)

    if not FLAGS.pretrain:
        with tf.variable_scope("truth"):
            _, true_stats = exact_gp(logger,
                                     sess,
                                     data,
                                     n_epochs=100,
                                     ard=False,
                                     eval=False)
        gp, _ = exact_gp(logger, sess, data, n_epochs=0, ard=False, eval=False)
    else:
        gp, true_stats = exact_gp(logger,
                                  sess,
                                  data,
                                  n_epochs=100,
                                  ard=False,
                                  eval=False)
        if FLAGS.measure == "noise":
            measure_batch += tf.random_normal(
                tf.shape(measure_batch),
                dtype=tf.float64) * (gp.kern.lengthscales / np.sqrt(2))

    set_up_figure(data, true_stats)

    if FLAGS.method == "svgp":
        test_stats = svgp(logger, sess, data, ard=False)
        plot_method(data, test_stats, "b")
        path = os.path.join(result_path,
                            "svgp-{}.png").format(FLAGS.n_inducing)
        save_figure(path, test_x)

    if FLAGS.method == "fbnn":
        test_stats = fbnn(logger, sess, data, prior_gp=gp, ard=False)
        plot_method(data, test_stats, "r")
        path = os.path.join(result_path,
                            "fbnn-{}-{}.png").format(FLAGS.m, FLAGS.net)
        save_figure(path, test_x)

    if FLAGS.method == "gpnet":
        test_stats = gpnet(logger, sess, data, prior_gp=gp, ard=False)
        plot_method(data, test_stats, "g")
        path = os.path.join(result_path,
                            "gpnet-{}-{}.png").format(FLAGS.m, FLAGS.net)
        save_figure(path, test_x)

    if FLAGS.method == "gpnet_nonconj":
        test_stats = gpnet_nonconj(logger, sess, data, prior_gp=gp, ard=False)
        plot_method(data, test_stats, "g")
        path = os.path.join(result_path, "gpnet-nonconj-{}-{}.png").format(
            FLAGS.m, FLAGS.net)
        save_figure(path, test_x)

    sess.close()
示例#13
0
def train():
    path_train, path_validation, log, policy, model_dir, it, l, multilabel, es, validate, n_features = parse_args()
    setup_logger(log)
    _train(path_train, path_validation, policy, model_dir, it, l, multilabel, es, validate, n_features)
示例#14
0
def predict():
    path, log, model_dir, multilabel, n_features = parse_args()
    setup_logger(log)
    _predict(path, model_dir, multilabel, n_features)
示例#15
0
from utils.appliance import Appliance
from utils.trackerbot import api
from utils.log import setup_logger
from slumber.exceptions import HttpClientError


token = docker_conf['gh_token']
owner = docker_conf['gh_owner']
repo = docker_conf['gh_repo']

tapi = api()

CONT_LIMIT = docker_conf['workers']
DEBUG = docker_conf.get('debug', False)

logger = setup_logger(logging.getLogger('prt'))

# Disable pika logs
logging.getLogger("pika").propagate = False


def send_message_to_bot(msg):

    required_fields = set(['rabbitmq_url', 'gh_queue', 'gh_channel', 'gh_message_type'])
    if not required_fields.issubset(docker_conf.viewkeys()):
        logger.warn("Skipping - docker.yaml doesn't have {}".format(required_fields))
        return

    logger.info("Github PR bot: about to send '{}'".format(msg))
    url = docker_conf['rabbitmq_url']
    queue = docker_conf['gh_queue']
示例#16
0
import logging
import pytest
import signal
import subprocess
import time

from cfme.configure import configuration
from fixtures.artifactor_plugin import art_client, get_test_idents
from utils.conf import env
from utils.log import setup_logger
from utils.net import random_port, my_ip_address, net_check_remote
from utils.path import scripts_path
from utils.smtp_collector_client import SMTPCollectorClient


logger = setup_logger(logging.getLogger('emails'))


@pytest.fixture(scope="function")
def smtp_test(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %s and %s open", mail_query_port, mail_server_port)
    smtp_conf = configuration.SMTPSettings(
示例#17
0
Expected events are defined by set of event attributes which should match to the same event
attributes in event_streams db table except one fake attribute - target_name which is resolved into
certain object's id.

Default match algorithm is ==. Event also accepts match function in order to change default
match type.
"""
import logging
import pytest

from utils.log import setup_logger
from utils.wait import wait_for, TimedOutError

# xxx better logger name
logger = setup_logger(logging.getLogger('events'))


@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
    try:
        yield
    finally:
        if "register_event" in item.funcargnames:
            event_listener = item.funcargs["register_event"]
            soft_assert = item.funcargs["soft_assert"]

            try:
                logger.info('Checking the events to come.')
                wait_for(event_listener.check_expected_events,
                         delay=5,
示例#18
0
import datetime
import json
import logging
import os

import streaming.match as match
from utils import telegram
from utils.env import getenv
from utils.log import setup_logger

setup_logger(__name__)
log = logging.getLogger(__name__)

TELEGRAM_TOKEN = getenv("TELEGRAM_TOKEN")
TELEGRAM_CHAT_ID = getenv("TELEGRAM_CHAT_ID")
TELEGRAM_BOT_NAME = getenv("TELEGRAM_BOT_NAME")
TELEGRAM_ALERT_GROUP = json.loads(os.environ["TELEGRAM_ALERT_GROUP"])


def handler(event, context):
    """Perform appropriate action for each endpoint invocation"""
    try:
        if event["path"] == "/hello":
            return {"statusCode": 200, "body": "hello, world!"}
        elif event["path"] == "/alert":
            return send_alert(event, context)
        elif event["path"] == "/webhookUpdate":
            return handle_webhook_update(event, context)
    except Exception:
        log.error("Handling request", exc_info=True)
        return {
示例#19
0
from utils import log
from utils.SKHttpSession import SpaceKnowHttpSession as SKHttpSession
from utils.SKRemoteTaskService import SKRemoteTaskService

from core.CarsAnalysis import CarsAnalysis
from core.Imagery import Imagery


parser = argparse.ArgumentParser(description='Space Know command line client tool.')
requiredArgs = parser.add_argument_group('Required arguments')
requiredArgs.add_argument('-u', '--username', help='SpaceKnow account user name', required=True)
requiredArgs.add_argument('-p', '--password', help='SpaceKnow account password', required=True)
args = parser.parse_args()


logger = log.setup_logger('root')
loop = asyncio.get_event_loop()
http = SKHttpSession(loop)
tasking = SKRemoteTaskService(http, loop)


logger.info("Starting applicaton")
credentials = files.read_json("input/login.json")
credentials["username"] = args.username
credentials["password"] = args.password
http.login(credentials)


logger.info("Retrieving imagery")
imageryRequest = files.read_json("input/imagery_request.json")
imagery = Imagery(tasking, http)