Example #1
0
def start(instance="default"):
    """
    Attach ipmi-console to target instance specified by
    its name
    :param instance: infrasim instance name
    """
    # initialize logging
    global logger_ic
    logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)
    common.init_logger(instance)

    # initialize environment
    env.local_env.quit_flag = False
    common.init_env(instance)
    pid_file = "{}/{}/.ipmi_console.pid".format(config.infrasim_home, instance)
    daemon.daemonize(pid_file)
    with open(pid_file, "r") as fp:
        logger_ic.info("ipmi-console of {} start with pid {}".
                       format(instance, fp.read().strip()))

    # parse the sdrs and build all sensors
    sdr.parse_sdrs()

    # running thread for each threshold based sensor
    _start_monitor(instance)
    _spawn_sensor_thread()
    _start_console(instance)
Example #2
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument("--do_train", default=True, action='store_true')
    parser.add_argument('--do_eval', default=False, action='store_true')
    parser.add_argument("--do_predict", default=False, action='store_true')

    parser.add_argument('--markup',
                        default='bieos',
                        type=str,
                        choices=['bios', 'bio', 'bieos'])  # 标签类型
    parser.add_argument("--arch", default='bilstm_crf', type=str)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--seed', default=1234, type=int)
    parser.add_argument('--gpu', default='', type=str)
    parser.add_argument('--epochs', default=100, type=int)
    parser.add_argument('--batch_size', default=32, type=int)
    parser.add_argument('--embedding_size', default=128, type=int)
    parser.add_argument('--hidden_size', default=384, type=int)
    parser.add_argument("--grad_norm",
                        default=5.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--task_name", type=str, default='ner')
    args = parser.parse_args()
    args.data_dir = config.data_dir
    if not config.output_dir.exists():
        args.output_dir.mkdir()
    args.output_dir = config.output_dir / '{}'.format(args.arch)
    if not args.output_dir.exists():
        args.output_dir.mkdir()
    init_logger(log_file=str(args.output_dir /
                             '{}-{}.log'.format(args.arch, args.task_name)))
    seed_everything(args.seed)
    if args.gpu != '':
        args.device = torch.device(f"cuda:{args.gpu}")
    else:
        args.device = torch.device("cpu")
    args.id2label = {i: label for i, label in enumerate(config.label2id)}
    args.label2id = config.label2id

    processor = CluenerProcessor(data_dir=config.data_dir)
    processor.get_vocab()

    model = BERT_NERModel(device=args.device,
                          label2id=args.label2id,
                          need_birnn=True)
    # model = NERModel(vocab_size=len(processor.vocab), embedding_size=args.embedding_size,
    #                  hidden_size=args.hidden_size, device=args.device, label2id=args.label2id)

    model.to(args.device)

    if args.do_train:
        train(args, model, processor)
    if args.do_eval:
        model_path = args.output_dir / 'best-model.bin'
        model = load_model(model, model_path=str(model_path))
        evaluate(args, model, processor)
    if args.do_predict:
        predict(args, model, processor)
Example #3
0
def start(instance="default"):
    """
    Attach ipmi-console to target instance specified by
    its name
    :param instance: infrasim instance name
    """
    # initialize logging
    global logger_ic
    logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value,
                                        instance)
    common.init_logger(instance)

    # initialize environment
    env.local_env.quit_flag = False
    common.init_env(instance)
    daemon.daemonize("{}/{}/.ipmi_console.pid".format(config.infrasim_home,
                                                      instance))

    # parse the sdrs and build all sensors
    sdr.parse_sdrs()

    # running thread for each threshold based sensor
    _start_monitor(instance)
    _spawn_sensor_thread()
    _start_console(instance)
Example #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--do_train", default=False, action='store_true')
    parser.add_argument("--do_eval", default=False, action='store_true')
    parser.add_argument('--do_predict', default=False, action='store_true')
    #bios,s是single单字
    parser.add_argument('--markup',
                        default='bios',
                        type=str,
                        choices=['bio', 'bios'])
    parser.add_argument("--arch", default='bert', type=str)
    parser.add_argument('--seed', default=1234, type=int)
    parser.add_argument('--gpu', default='0', type=str)
    parser.add_argument('--epochs', default=20, type=int)
    parser.add_argument('--batch_size', default=16, type=int)
    parser.add_argument("--task_name", type=str, default='ner')
    parser.add_argument("--max_seq_len", type=int, default=64)
    parser.add_argument("--aug_num", default=4, type=int)

    args = parser.parse_args()
    args.vocab_file = config.vocab_file
    args.data_dir = config.data_dir
    args.tf_checkpoint = config.tf_checkpoint
    args.init_checkpoint = config.init_checkpoint
    args.bert_config_file = config.bert_config_file

    if not config.output_dir.exists():
        args.output_dir.mkdir()
    args.output_dir = config.output_dir / '{}'.format(args.arch)
    if not args.output_dir.exists():
        args.output_dir.mkdir()
    init_logger(log_file=str(args.output_dir /
                             '{}-{}.log'.format(args.arch, args.task_name)))
    seed_everything(args.seed)
    #设置gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    args.id2label = {i: label for i, label in enumerate(config.label2id)}
    args.label2id = config.label2id

    processor = CluenerProcessor(data_dir=config.data_dir,
                                 vocab_file=config.vocab_file,
                                 label2id=config.label2id,
                                 max_seq_len=args.max_seq_len)
    processor.get_label_embedding(args.label2id,
                                  config.pretrain_label_embedding_file,
                                  config.label_embedding_file)
    args.label_embedding = processor.label_embedding
    if args.do_train:
        train(args, NERModel, processor)

    if args.do_predict:
        predict(args, processor)
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--do_train", default=False, action='store_true')
    parser.add_argument("--do_eval", default=False, action='store_true')
    parser.add_argument('--do_predict', default=False, action='store_true')
    parser.add_argument('--markup', default='bmeso', type=str)
    parser.add_argument("--arch", default='transformer', type=str)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--seed', default=1234, type=int)
    parser.add_argument('--gpu', default='0', type=str)
    parser.add_argument('--epochs', default=100, type=int)
    parser.add_argument('--batch_size', default=16, type=int)
    # parser.add_argument('--hidden_size', default=512, type=int)
    parser.add_argument("--grad_norm",
                        default=5.0,
                        type=float,
                        help="Max gradient norm.")

    args = parser.parse_args()

    args.data_dir = config.data_dir
    args.output_dir = config.output_dir / Path(
        '{}'.format(str(config.dataset) + '-on-lstm'))
    if not args.output_dir.exists():
        args.output_dir.mkdir()
    init_logger(log_file=str(args.output_dir / '{}.log'.format(args.arch)))
    seed_everything(args.seed)

    # 设置gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    if os.path.exists(config.save_data_name):
        print('Loading processed data')
        with open(config.save_data_name, 'rb') as fp:
            data = pickle.load(fp)
    else:
        data = Data()
        data_initialization(data, config.gaz_file, config.train_path,
                            config.dev_path, config.test_path)
        data.generate_instance_with_gaz(config.train_path, 'train')
        data.generate_instance_with_gaz(config.dev_path, 'dev')
        data.generate_instance_with_gaz(config.test_path, 'test')
        data.build_word_pretrain_emb(config.char_emb)
        data.build_biword_pretrain_emb(config.bichar_emb)
        data.build_gaz_pretrain_emb(config.gaz_file)
        print('Dumping data')
        with open(config.save_data_name, 'wb') as f:
            pickle.dump(data, f)
    if args.do_train:
        train(args, data, NERModel)

    if args.do_predict:
        predict(args, data, NERModel, 'test')
Example #6
0
 def __init__(self, vocab_size, dim_emb=DIM_EMB, *args, **kwargs):
     """Initialize a network."""
     super(CBOW, self).__init__()
     self.verbose = kwargs.get('verbose', False)
     self.logger = init_logger('CBOW')
     self.embeddings_x = nn.Embedding(vocab_size, dim_emb, sparse=True)
     self.embeddings_y = nn.Embedding(vocab_size, dim_emb, sparse=True)
Example #7
0
def main():
    # 注册退出回调函数
    #atexit.register(main_shutdown)
    logger = common.init_logger("server")
    proxy = Proxy(logger)
    proxy.start()
    proxy.join()
    logger.info('server end')
Example #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--do_train", default=False, action='store_true')
    parser.add_argument("--do_eval", default=False, action='store_true')
    parser.add_argument('--do_predict', default=False, action='store_true')
    #bios,s是single单字
    parser.add_argument('--markup',
                        default='bios',
                        type=str,
                        choices=['bio', 'bios'])
    parser.add_argument("--arch", default='bilstm_crf', type=str)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--seed', default=1234, type=int)
    parser.add_argument('--gpu', default='0', type=str)
    parser.add_argument('--epochs', default=80, type=int)
    parser.add_argument('--batch_size', default=32, type=int)
    parser.add_argument('--embedding_size', default=128, type=int)
    parser.add_argument('--hidden_size', default=384, type=int)
    parser.add_argument("--grad_norm",
                        default=5.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--task_name", type=str, default='ner')

    args = parser.parse_args()
    args.data_dir = config.data_dir
    if not config.output_dir.exists():
        args.output_dir.mkdir()
    args.output_dir = config.output_dir / '{}'.format(args.arch)
    if not args.output_dir.exists():
        args.output_dir.mkdir()
    init_logger(log_file=str(args.output_dir /
                             '{}-{}.log'.format(args.arch, args.task_name)))
    seed_everything(args.seed)
    #设置gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    args.id2label = {i: label for i, label in enumerate(config.label2id)}
    args.label2id = config.label2id
    processor = CluenerProcessor(data_dir=config.data_dir)
    processor.get_vocab()
    if args.do_train:
        train(args, NERModel, processor)

    if args.do_predict:
        predict(args, NERModel, processor)
Example #9
0
 def __init__(self):
     self.logger = common.init_logger("client")
     self.logger.info("test")
     _addr, _port = config.proxy.split(':')
     self.proxy_port = int(_port)
     self.proxy_addr = _addr
     self.my_name = config.user
     self.registered = False
     self.url_parser = re.compile("((.*):)?(\d+)")
Example #10
0
 def __init__(self):
     self.logger = common.init_logger("client")
     self.logger.info("test")
     yaml_pathname = common.build_filename("client.yaml")
     f = open(yaml_pathname)
     cfg = yaml.load(f)
     f.close()
     _addr, _port = cfg['proxy'].split(':')
     self.proxy_port = int(_port)
     self.proxy_addr = _addr
     self.my_name = cfg['user']
     self.registered = False
     self.url_parser = re.compile("((.*):)?(\d+)")
Example #11
0
    def __init__(self, window_size, *args, **kwargs):
        "Set variables."
        self.verbose = kwargs.get('verbose', False)
        self.logger = init_logger('Corpus')
        self.window_size = window_size  # length of window on each side

        # Vocabulary
        self.w2i = defaultdict(lambda: len(self.w2i))
        self.freq = defaultdict(int)

        self.UNK = self.w2i['<unk>']
        self.BOS = self.w2i['<s>']
        self.EOS = self.w2i['</s>']
        self.freq[self.UNK] = 0
        self.freq[self.BOS] = 0
        self.freq[self.EOS] = 0
        self.vocab_range = {}
Example #12
0
        'docker rm -fv $(docker ps -aq --filter=label=com.avatao.typed_crp_id=docker)'
    ],
                     stderr=subprocess.DEVNULL,
                     stdout=subprocess.DEVNULL).wait()


if __name__ == '__main__':
    """
    Run avatao solvable and controller docker images. Simply add the challenge repository path as the first
    argument and the script does the rest.

    Python dependencies:
        - PyYAML (http://pyyaml.org/) or simply `pip3 install PyYAML`
          (on Ubuntu you additionally need `apt-get install python3-yaml`)
    """
    init_logger()
    repo_path, repo_name = get_sys_args()

    os.chdir(repo_path)
    atexit.register(remove_containers)

    proc_list = []
    first = None

    if 'crp_config' not in read_config(repo_path):
        logging.warning('There is no crp_config in the config.yml, if this is '
                        'a static challenge you don\'t need to run it.')
        sys.exit(0)

    for short_name, crp_config_item in get_crp_config(repo_path,
                                                      repo_name).items():
Example #13
0
    for sensor_obj in sdr.sensor_list:
        sensor_obj.set_mode("user")
        # set quit flag
        sensor_obj.set_quit(True)
        # acquire the lock that before notify
        sensor_obj.condition.acquire()
        sensor_obj.condition.notify()
        sensor_obj.condition.release()

    for thread in sensor_thread_list:
        thread.join()

if __name__ == '__main__':

    # initialize logging
    common.init_logger()
    # open telnet session to IPMI simulator
    # common.open_telnet_session()

    # parse the sdrs and build all sensors
    sdr.parse_sdrs()

    # running thread for each threshold based sensor
    spawn_sensor_thread()

    if SERVERTYPE == 'threaded':
        # Single threaded server - only one session at a time
        class TelnetServer(SocketServer.TCPServer):
            allow_reuse_address = True

        server = TestSSHHandler(port=SSH_PORT_BINDING)
Example #14
0
#!/usr/bin/env python

import os
import re
import common
import regexs
from collections import defaultdict
from operator import itemgetter
import signal
import multiprocessing
import argparse

g_logger = common.init_logger()

parser = argparse.ArgumentParser()
parser.add_argument('-d',
                    '--dir',
                    type=str,
                    default='./',
                    help='the log file diretory.')
parser.add_argument('-p',
                    '--prev',
                    action='store_true',
                    help='if parse prev logs.')
parser.add_argument('-f',
                    '--status_file',
                    action='store_true',
                    help='if get path from the local file.')
args = parser.parse_args()

g_shared_list = multiprocessing.Manager().list()
Example #15
0
"""
import os
import sys
import datetime
import time
import traceback

home_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0])
sys.path.append(home_dir + "/bin")
sys.path.append(home_dir + "/conf") 
import common
import etl
import make_train_data
import common

log = common.init_logger()

def main():
    """主函数"""
    stocks = common.load_all_stocks()
    for i, stock in enumerate(stocks): 
        try:
            # if i < 2809:
            #     continue 
            stock_no = stock['stock_no']
            print stock_no
            
            start = stock['start']  
            log.info("run %s,%s,%s" %(stock_no,start,i)) 
            
            # etl.download(stock_no,start) #下载
Example #16
0
        sensor_obj.set_mode("user")
        # set quit flag
        sensor_obj.set_quit(True)
        # acquire the lock that before notify
        sensor_obj.condition.acquire()
        sensor_obj.condition.notify()
        sensor_obj.condition.release()

    for thread in sensor_thread_list:
        thread.join()


if __name__ == '__main__':

    # initialize logging
    common.init_logger()
    # open telnet session to IPMI simulator
    # common.open_telnet_session()

    # parse the sdrs and build all sensors
    sdr.parse_sdrs()

    # running thread for each threshold based sensor
    spawn_sensor_thread()

    if SERVERTYPE == 'threaded':
        # Single threaded server - only one session at a time
        class TelnetServer(SocketServer.TCPServer):
            allow_reuse_address = True

        server = TestSSHHandler(port=SSH_PORT_BINDING)
Example #17
0
            close_and_swallow(pipefd[1])
            logger.debug("wait_on_haproxy_pipe done (False)")
            return False
    except OSError as e:
        logger.debug("wait_on_haproxy_pipe OSError: %s", e)
        if e.args[0] != errno.EINTR:
            close_and_swallow(pipefd[0])
            close_and_swallow(pipefd[1])
            logger.debug("wait_on_haproxy_pipe done (False)")
            return False
    logger.debug("wait_on_haproxy_pipe done (True)")
    return True


init_logger(
    "/dev/null",
    "%(asctime)s.%(msecs)03d%(timezoneiso8601) %(levelname)s - 0 python %(name)s {\"@message\": \"%(message)s\"}",
    "DEBUG")
logger = common.marathon_lb_logger.getChild('haproxy_wrapper.py')

pipefd = create_haproxy_pipe()

pid = os.fork()

if not pid:
    os.environ["HAPROXY_WRAPPER_FD"] = str(pipefd[1])
    # Close the read side
    os.close(pipefd[0])
    os.execv(sys.argv[1], sys.argv[1:])

# Close the write side
os.close(pipefd[1])
Example #18
0
#!/usr/bin/env python

import time
import boto.ec2
import re
import sys
import os
import logging
import socket

from common import load_config, get_aws_public_hostname, init_logger
init_logger()

from fabric.api import *

# http://examples.oreilly.com/0636920020202/ec2_launch_instance.py

def connect():
    config = load_config() #ConfigParser.ConfigParser()

    aws_key = config.get('aws', 'aws_key')
    aws_secret = config.get('aws', 'aws_secret')

    conn = boto.ec2.connect_to_region(
        'us-east-1',
        aws_access_key_id = aws_key,
        aws_secret_access_key = aws_secret,
    )

    return conn
Example #19
0
        print('[{}] loss = {:.4f} ({:.4f}/{:.4f}), time = {:.2f}'.format(
            ITER + 1, train_loss_we + train_loss_d, train_loss_we,
            train_loss_d,
            time.time() - start))

        # Save vectors
        embs = model.get_embeddings()
        save_embeddings(args.path_output, embs, corpus.i2w)
        if args.dir_model:
            save_model(args.dir_model, model, corpus.w2i)

    return 0


if __name__ == '__main__':
    logger = init_logger('MAIN')
    parser = argparse.ArgumentParser()
    parser.add_argument('-s',
                        '--src',
                        dest='path_src',
                        required=True,
                        help='path to a source corpus file')
    parser.add_argument('-t',
                        '--trg',
                        dest='path_trg',
                        required=True,
                        help='path to a target corpus file')
    parser.add_argument('--window-size',
                        type=int,
                        default=2,
                        help='window size on each side')
Example #20
0
                        help="The proxy listen port.")

    tmpl_parser = subparsers.add_parser('tmpl', help="Use default template.")
    tmpl_parser.add_argument("title", type=str, help="Web site title.")
    tmpl_parser.set_defaults(func=run_tmpl)

    clone_parser = subparsers.add_parser('clone',
                                         help="Clone web site from url.")
    clone_parser.add_argument("url", type=str, help="Url to clone.")
    clone_parser.add_argument("--path",
                              type=str,
                              default="login",
                              help="Request path to be logged.")
    clone_parser.add_argument("--ext", type=str, help="Extends (.py).")
    clone_parser.set_defaults(func=run_clone)

    spec_parser = subparsers.add_parser('spec',
                                        help="Use specified web site file.")
    spec_parser.add_argument("wwwroot", type=str, help="Web site html file.")
    spec_parser.add_argument("--path",
                             type=str,
                             default="login",
                             help="Request path to be logged.")
    spec_parser.set_defaults(func=run_spec)

    args = parser.parse_args()
    init_logger("fakelogin.log", args.debug)
    args.func(args)

    IPython.embed()
Example #21
0
    except OSError as e:
        logger.debug("wait_on_haproxy_pipe OSError: %s", e)
        if e.args[0] != errno.EINTR:
            close_and_swallow(pipefd[0])
            close_and_swallow(pipefd[1])
            logger.debug("wait_on_haproxy_pipe done (False)")
            return False
    logger.debug("wait_on_haproxy_pipe done (True)")
    return True


syslog_socket = os.getenv("WRAPPER_SYSLOG_SOCKET")
log_format = os.getenv("WRAPPER_LOG_FORMAT")
log_level = os.getenv('WRAPPER_LOG_LEVEL')

init_logger(syslog_socket, log_format, log_level)
logger = common.marathon_lb_logger.getChild('haproxy_wrapper.py')

pipefd = create_haproxy_pipe()

pid = os.fork()

if not pid:
    os.environ["HAPROXY_WRAPPER_FD"] = str(pipefd[1])
    # Close the read side
    os.close(pipefd[0])
    os.execv(sys.argv[1], sys.argv[1:])

# Close the write side
os.close(pipefd[1])
while wait_on_haproxy_pipe(pipefd):
Example #22
0
            tw_in = sum(tw[:, 1] < 0)
            features.extend([tw_out, tw_in])
    return features

    return new_x


def build_dict(features):
    vocab, inverse = np.unique(features, return_inverse=True)
    return vocab, inverse.reshape(features.shape[0], features.shape[1])


if __name__ == '__main__':
    global n, w
    '''initialize logger'''
    logger = common.init_logger("extract")
    '''read config file'''
    cf = common.read_conf(common.confdir)
    MON_SITE_NUM = int(cf['monitored_site_num'])
    MON_INST_NUM = int(cf['monitored_inst_num'])
    num_class = MON_SITE_NUM
    if cf['open_world'] == '1':
        UNMON_SITE_NUM = int(cf['unmonitored_site_num'])
        num_class += 1
    else:
        UNMON_SITE_NUM = 0
    '''read in arg'''
    parser = argparse.ArgumentParser(description='DF feature extraction')
    parser.add_argument(
        '--dir',
        metavar='<traces path>',