示例#1
0
def main(args):
    # Parse config file
    config = parse_config(args.config_file)

    # verify the config file and get the Carbon Black Cloud Server list
    output_params, server_list = verify_config(config)

    # Store Forward.  Attempt to send messages that have been saved due to a failure to reach the destination
    send_stored_data(output_params)

    logger.info("Found {0} Carbon Black Cloud Servers in config file".format(
        len(server_list)))

    # Iterate through our Carbon Black Cloud Server list
    for server in server_list:
        logger.info("Handling notifications for {0}".format(
            server.get('server_url')))

        notification_logs = fetch_notification_logs(
            server, output_params['output_format'],
            output_params['policy_action_severity'])

        logger.info("Sending Notifications")
        send_new_data(output_params, notification_logs)
        logger.info("Done Sending Notifications")

        audit_logs = fetch_audit_logs(server, output_params['output_format'])

        logger.info("Sending Audit Logs")
        send_new_data(output_params, audit_logs)
        logger.info("Done Sending Audit Logs")
def main():
    # bert 参数初始化
    config = parse_config('BERT')

    # Parse arguments and print them
    args = parse_args()
    print("\nMain arguments:")
    for k, v in args.__dict__.items():
        print("{}={}".format(k, v))

    # Generate bert model ckt path and warm start path
    warm_start_path = None
    if args.load_bert_ckt:
        warm_start_path = args.buckets + args.load_bert_ckt + "/model.ckpt-{}".format(
            args.load_bert_step)
        warm_start_settings = tf.estimator.WarmStartSettings(
            warm_start_path, vars_to_warm_start='bert*')
    elif args.load_all_layers_ckt:
        warm_start_path = args.buckets + args.load_all_layers_ckt + "/model.ckpt-{}".format(
            args.load_all_step)
        warm_start_settings = tf.estimator.WarmStartSettings(
            warm_start_path, vars_to_warm_start=".*")
    else:
        raise ValueError("No pretain params for finetune models")

    # Check if the model has already exisited
    model_save_dir = args.buckets + args.checkpoint_dir
    warm_start_dir = None  # bert.*
    if tf.gfile.Exists(model_save_dir + "/checkpoint"
                       ) and args.load_all_layers_ckt != args.checkpoint_dir:
        raise ValueError(
            "Model %s has already existed, please delete them and retry" %
            model_save_dir)

    helper.dump_args(model_save_dir, args)

    bert_model = BertFinetune(config)

    estimator = tf.estimator.Estimator(
        model_fn=bert_model.model_fn,
        model_dir=model_save_dir,
        config=tf.estimator.RunConfig(session_config=tf.ConfigProto(
            gpu_options=tf.GPUOptions(allow_growth=True)),
                                      save_checkpoints_steps=args.snap_shot,
                                      keep_checkpoint_max=100),
        warm_start_from=warm_start_settings)

    print("Start training......")
    estimator.train(
        finetune_loader.OdpsDataLoader(table_name=args.tables,
                                       config=config,
                                       mode=1).input_fn,
        steps=config["num_train_steps"],
    )
示例#3
0
def main():
    # bert 参数初始化
    config = parse_config('BERT')

    args = parse_args()
    print("Main arguments:")
    for k, v in args.__dict__.items():
        print("{}={}".format(k, v))

    # Setup distributed inference
    dist_params = {
        "task_index": args.task_index,
        "ps_hosts": args.ps_hosts,
        "worker_hosts": args.worker_hosts,
        "job_name": args.job_name
    }
    slice_count, slice_id = env.set_dist_env(dist_params)

    bert_model = BertFinetune(config)

    # Load model arguments
    model_save_dir = args.buckets + args.checkpoint_dir

    checkpoint_path = None
    if args.step > 0:
        checkpoint_path = model_save_dir + "/model.ckpt-{}".format(args.step)

    estimator = tf.estimator.Estimator(
        model_fn=bert_model.model_fn,
        model_dir=model_save_dir,
        config=tf.estimator.RunConfig(
            session_config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True)),
            save_checkpoints_steps=config["num_train_steps"],
            keep_checkpoint_max=1))

    result_iter = estimator.predict(finetune_loader.OdpsDataLoader(
        table_name=args.tables,
        config=config,
        mode=0,
        slice_id=slice_id,
        slice_count=slice_count).input_fn,
                                    checkpoint_path=checkpoint_path)

    odps_writer = dumper.get_odps_writer(args.outputs, slice_id=slice_id)
    _do_prediction(result_iter, odps_writer, args)
def main():
    # bert 参数初始化
    config = parse_config('MiniBERT')

    # Parse arguments and print them
    args = parse_args()
    print("\nMain arguments:")
    for k, v in args.__dict__.items():
        print("{}={}".format(k, v))

    # Check if the model has already exisited
    model_save_dir = args.buckets + args.checkpoint_dir
    warm_start_settings = None
    if tf.gfile.Exists(model_save_dir +
                       "/checkpoint") and not args.warm_start_step:
        raise ValueError(
            "Model %s has already existed, please delete them and retry" %
            model_save_dir)
    elif args.warm_start_step:
        warm_start_path = model_save_dir + "/model.ckpt-{}".format(
            args.warm_start_step)
        warm_start_settings = tf.estimator.WarmStartSettings(warm_start_path)
        print("Model init training from %s" % warm_start_path)
    else:
        pass

    helper.dump_args(model_save_dir, args)
    bert_model = BertPretrain(config)

    estimator = tf.estimator.Estimator(
        model_fn=bert_model.model_fn,
        model_dir=model_save_dir,
        config=tf.estimator.RunConfig(session_config=tf.ConfigProto(
            gpu_options=tf.GPUOptions(allow_growth=True)),
                                      save_checkpoints_steps=args.snap_shot,
                                      keep_checkpoint_max=100),
        warm_start_from=warm_start_settings)

    print("Start training......")
    estimator.train(
        pretrain_loader.OdpsDataLoader(table_name=args.tables,
                                       config=config,
                                       mode=1).input_fn,
        steps=config["num_train_steps"],
    )
示例#5
0
def main():
    parser = ArgumentParser("Research Experiment Runner")
    parser.add_argument("config",
                        metavar="config_json",
                        help="Experiment configuration JSON file")
    parser.add_argument(
        "--override",
        metavar="override_json",
        default=None,
        type=str,
        help=
        "Serialized JSON object to merge into configuration (overrides config)",
    )
    args = parser.parse_args()

    config = parse_config(args.config, args.override)
    agent_query = config.get("agent", None)
    agent_class = fetch_class(agent_query)
    agent_instance = agent_class(config)
    try:
        agent_instance.run()
    finally:
        agent_instance.finalize()
示例#6
0
#
# t = T()
# for _ in range(3):
#     env.reset(t.init_func)
#     for _0 in range(25):
#         _1, _2, over, _4, _5, _6, _7 = env.step(t.train_func)
#         if over:
#             break
#     env.render('video')

# # ---------------------------------------------------------------------

# Whole Test. ---------------------------------------------------------

from task.framework import DeepQNetwork
import util.config as conf_util
import os

config_file = '/FocusDQN/config.ini'
config_file = os.path.abspath(os.path.dirname(os.getcwd())) + config_file
config = conf_util.parse_config(config_file)

data_adapter = BratsAdapter(enable_data_enhance=False)

dqn = DeepQNetwork(config=config, name_space='ME', data_adapter=data_adapter)

# Train.
dqn.train(epochs=10, max_instances=260)
# Test.
dqn.test(110, is_validate=False)
示例#7
0
  -t lookbackTime  lookbackTime with units
  -g groupBy  group commits by repo or repo followed by date or date followed by repo.
"""

from datetime import datetime

from docopt import docopt

from github.activity import summarise_commits
from util.config import parse_config
from util.constant import *

if __name__ == '__main__':
    arguments = docopt(__doc__)
    appname = arguments['-p']
    config_dict = parse_config(app_name=appname)
    lookback_time = 604800

    if ('-t' in arguments and arguments['-t']):
        config_dict[LOOKBACK_TIME] = arguments['-t']

    if ('-g' in arguments and arguments['-g']):
        config_dict[GROUPY_BY] = arguments['-g']

    if (config_dict[LOOKBACK_TIME]):
        time_unit_map = {
            's': 1,
            'm': 60,
            'h': 60 * 60,
            'd': 60 * 60 * 24,
            'w': 60 * 60 * 24 * 7