Exemple #1
0
def global_r_call(function_name, args=None, r_file=PATIENT_MODULE_FILE_NAME):
	"""
	Writing shortcut for all previous function using this same code
	:param function_name: name of the ro.globalenv function to call
	:type function_name: str
	:param args: usually a selector like GroupID
	:type args:
	:param r_file: The file to be loaded for the call
	:type r_file: str
	:rtype:
	"""
	# Source & export R code
	source_file(r_file)

	# Export a function to call
	r_getter_func = ro.globalenv[function_name]

	# R call
	data = list()
	try:
		# Arguments magics
		if args:
			if type(args) is not tuple:
				args = (args,)
			data = r_getter_func(*args)
		else:
			data = r_getter_func()
	except RRuntimeError as e:
		get_logger().error(e)

	return data
Exemple #2
0
 def __init__(self, ml_name, data, split_dict, **kwargs):
     self.sample_feats, self.label_feat = self.preprocessing(data.sample_feats, data.label_feats, INDEX)
     assert np.all(np.isfinite(self.label_feat))
     self.ml_name = ml_name
     # training and test
     self.Xtrain, self.Xtest, self.ytrain, self.ytest = \
         train_test_split(self.sample_feats, self.label_feat, **split_dict)
     # model
     train_test_str = '-' + dict_to_str(split_dict)
     if kwargs is not None and len(kwargs) != 0:
         classifier_str = '-' + dict_to_str(kwargs)
         pickle_name = str(INDEX) + '-' + self.ml_name + train_test_str + classifier_str + '.pkl'
     else:
         pickle_name = str(INDEX) + '-' + self.ml_name + train_test_str + '.pkl'
     model_pickle = os.path.join(model_dir, pickle_name)
     if os.path.exists(model_pickle):
         utils.get_logger().warning('{} exists'.format(model_pickle))
         with open(model_pickle, 'rb') as model:
             self.clf = pickle.load(model)
     else:
         utils.get_logger().warning('{} does not exist'.format(model_pickle))
         self.clf = self.get_clf(kwargs)
         self.clf.fit(self.Xtrain, self.ytrain)
         with open(model_pickle, 'wb') as model:
             pickle.dump(self.clf, model)
Exemple #3
0
    def auto_register(mng_cls, cls, proxytype=None, init_args=(),
                      init_kwargs={}, **kwargs):
        '''
        Register shared object classes with a default proxytype.

        Parameters
        ----------
        cls : type
            class which is to be registered with the manager for use
            as a shared object
        proxytype : subclass of multiprocessing.managers.BaseProxy
            Proxy object used to communicate with a shared instance of cls.
            If None, then the following steps are attempted:
            1) an attempt is made to call the class' build_proxy method which
               is expected to provision and return a proxy object as well as
               register with the manager any sub-proxies which it expects to
               utilize.
            2) failing that, a default -> make_inst_proxy(cls) will be used.
        '''
        assert type(cls) == type
        typeid = cls.__name__
        if proxytype is None:
            try:  # to use cls defined proxy
                proxytype = cls.build_proxy(mng_cls)
            except AttributeError:
                proxytype = make_inst_proxy(cls)
                get_logger().debug("no proxy was provided for '{}' using "
                                   "default '{}'".format(cls, proxytype))

        cls = functools.partial(cls, *init_args, **init_kwargs)
        mng_cls.register(typeid, cls, proxytype=proxytype, **kwargs)
Exemple #4
0
    def _stop_service(self, container_id):
        ship = get_ship_name()
        service_dict = None
        service_list = kv_list('ships/{}/service/'.format(ship))
        if service_list:
            key = fnmatch.filter(service_list, '*/{}'.format(container_id))
            service_dict = kv_get(key[0]) if key else None
        if service_dict and service_dict['Status'] in ['crashed', 'not-recovered']:
            kv_remove(key[0])
        else:
            run_command_in_container('supervisorctl stop armada_agent', container_id)

            # TODO: Compatibility with old microservice images. Should be removed in future armada version.
            run_command_in_container('supervisorctl stop register_in_service_discovery', container_id)

            docker_api = docker_client.api()
            last_exception = None
            try:
                deregister_services(container_id)
            except:
                traceback.print_exc()
            for i in range(3):
                try:
                    docker_api.stop(container_id)
                    kv_remove(key[0])
                except Exception as e:
                    last_exception = e
                    traceback.print_exc()
                if not is_container_running(container_id):
                    break
            if is_container_running(container_id):
                get_logger().error('Could not stop container: {}'.format(container_id))
                raise last_exception
Exemple #5
0
def get_event_time(event, epoch=0.0):
    '''Return micro-second time stamp value in seconds
    '''
    value = event.getHeader('Event-Date-Timestamp')
    if value is None:
        get_logger().warning("Event '{}' has no timestamp!?".format(
                             event.getHeader("Event-Name")))
        return None
    return float(value) / 1e6 - epoch
Exemple #6
0
 def __init__(self):
     if os.path.isfile(data_pickle):
         utils.get_logger().warning('{} exists'.format(data_pickle))
         with open(data_pickle, 'rb') as raw_data:
             self.data = pickle.load(raw_data)
     else:
         utils.get_logger().warning('{} does not exist'.format(data_pickle))
         assert os.path.isfile(data_file)
         self.data = np.genfromtxt(data_file, delimiter=',')
         with open(data_pickle, 'wb') as raw_data:
             pickle.dump(self.data, raw_data)
Exemple #7
0
def connect_callback(timestamp):
    parser = argparse.ArgumentParser()

    parser.add_argument('--slave-host', help='Hostname or IP of the slave database')
    parser.add_argument('--slave-port', help='Port number for the slave database')
    parser.add_argument('--slave-user', help='Username for the slave database')
    parser.add_argument('--slave-password', help='Password for the slave database')
    parser.add_argument('--slave-database', help='Name of the slave database')

    parser.add_argument('--history-host', help='Hostname or IP of the history database')
    parser.add_argument('--history-port', help='Port number for the history database')
    parser.add_argument('--history-user', help='Username for the history database')
    parser.add_argument('--history-password', help='Password for the history database')
    parser.add_argument('--history-database', help='Name of the history database')

    args = parser.parse_args()

    slave = {
        'host': args.slave_host,
        'port': args.slave_port,
        'user': args.slave_user,
        'password': args.slave_password,
        'database': args.slave_database
    }

    history = {
        'host': args.history_host,
        'port': args.history_port,
        'user': args.history_user,
        'password': args.history_password,
        'database': args.history_database
    }


    slavecon, histcon = connect(slave, history)

    inspector_logger = get_logger('inspector')
    populator_logger = get_logger('populator')

    inspector = SlaveInspector(slavecon, logger=inspector_logger)
    populator = HistoryPopulator(histcon, logger=populator_logger)

    populator.create_tables()
    populator.update(timestamp)
    for schema in inspector.schemas():
        populator.add_schema(schema)
        for table in inspector.tables(schema):
            inspector.columns(table)
            populator.add_table(table)
            populator.create_table(table)
            populator.fill_table(table)

    return slavecon, inspector, populator
Exemple #8
0
def main():
    consul_mode, ship_ips, datacenter = _get_runtime_settings()
    ship_external_ip = os.environ.get('SHIP_EXTERNAL_IP', '')
    consul_config_content = consul_config.get_consul_config(consul_mode, ship_ips, datacenter, ship_external_ip)

    with open(consul_config.CONFIG_PATH, 'w') as config_file:
        config_file.write(consul_config_content)

    command = '/usr/local/bin/consul agent -config-file {config_path}'.format(config_path=consul_config.CONFIG_PATH)
    get_logger().info('RUNNING: {}'.format(command))

    args = command.split()
    os.execv(args[0], args)
Exemple #9
0
 def __init__(self, qid, event_queue):
     self.logger = get_logger()
     self.qid = qid
     self.aid = 1003903
     self.clientid = random.randrange(11111111, 99999999)
     self.msg_id = random.randrange(1111111, 99999999)
     self.group_map = {}      # 群映射
     self.group_m_map = {}    # 群到群成员的映射
     self.uin_qid_map = {}    # uin 到 qq号的映射
     self.check_code = None
     self.skey = None
     self.ptwebqq = None
     self.require_check = False
     self.QUIT = False
     self.last_msg = {}
     self.event_queue = event_queue
     self.check_data = None           # CheckHanlder返回的数据
     self.blogin_data = None          # 登录前返回的数据
     self.rc = 1
     self.start_time = time.time()
     self.hb_last_time = self.start_time
     self.poll_last_time = self.start_time
     self._helper = HttpHelper()
     self.connected = False
     self.polled = False
     self.heartbeated = False
     self.group_lst_updated = False
Exemple #10
0
def test_rora_connect():
	"""
	Test if RORA server is online and connection can be made successfully
	:return: True|False
	:rtype: bool
	"""
	r_code = 'source("%sconnection.R");' % settings.RORA_LIB
	r_code += 'link <- roraConnect();'
	r_code += 'dbDisconnect(link);'
	try:
		ro.r(r_code)
	except RRuntimeError as e:
		get_logger().error(e)
		return False

	return True
Exemple #11
0
def extract_from_file(filename, num_process):
    import utils

    global LOGGER
    LOGGER = utils.get_logger()

    dataset_path = u'../datasets/wiki'
    # get processed words
    processed_words = get_target_words(dataset_path)

    jobs = dd(list)
    for line in codecs.open(filename, encoding='utf-8'):
        line = line.split()
        target_word, page_title, offset = line[:3]
        if target_word not in processed_words:
            jobs[target_word].append(dict(word=target_word, page_title=page_title, offset=offset, fetch_links=True))

    LOGGER.info("Total {} of jobs available. Num of consumer = {}".format(len(jobs), num_process))
    if num_process > 1:
        pool = Pool(num_process)
        pool.map(extract_instances_for_word, jobs.values())
    else:
        # for v in jobs.values():
        for v in [jobs['milk']]:
            extract_instances_for_word(v)

    LOGGER.info("Done.")
Exemple #12
0
def proc_luna_running(**kw):
    """ Fetch test statuses from Redis, if test finisged notify service via API
        and call reduce job.
    """
    ext = {'test_id': kw.get('redis_value', {}).get('id')}
    logger = get_logger(**ext)

    if not ('t_fqdn' in kw and 't_tank_id' in kw):
        logger.erro('proc_luna_running call. Malformed params:{}'.format(kw))

    try:
    except TankClientError as e:
        logger.error('Tank API call failed: {}'.format(e))
        raise

    if tank_msg['status_code'] != 'FINISHED':
        if kw.get('status') and TestStateTr.tank_by_port(kw.get('status')) != tank_msg['status_code']:
            # test state changes since last check, need to notify
            port_state = TestStateTr.port_by_tank(tank_msg['status_code'])
            redis_value = kw['redis_value']
            redis_value.update({'status': port_state})
            redis.hset(r_adr['monitor_finish'], kw['id'],
                       msgpack.packb(redis_value))
            diff = {
                'status': port_state,
            }
Exemple #13
0
    def __init__(self):
        my_jid = JID(USER+'/Bot')
        self.my_jid = my_jid
        settings = XMPPSettings({
                            "software_name": "qxbot",
                            "software_version": __version__,
                            "software_os": "Linux",
                            "tls_verify_peer": False,
                            "starttls": True,
                            "ipv6":False,
                            "poll_interval": 10,
                            })

        settings["password"] = PASSWORD
        version_provider = VersionProvider(settings)
        event_queue = settings["event_queue"]
        self.webqq = WebQQ(QQ, event_queue)
        self.connected = False
        #self.mainloop = TornadoMainLoop(settings)
        self.mainloop = EpollMainLoop(settings)
        self.client = Client(my_jid, [self, version_provider],
                             settings, self.mainloop)
        self.logger = get_logger()
        self.msg_dispatch = MessageDispatch(self, self.webqq, BRIDGES)
        self.xmpp_msg_queue = Queue.Queue()
Exemple #14
0
def setup():
    global Options
    global Log
    global RootDir, VerifDir

    p = argparse.ArgumentParser(
        prog='untb',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        usage="untb <tb_name>",
        version="%(prog)s v"+str(__version__),
        description=__description__)

    p.add_argument('tb_name',           action='append',     nargs='+',   default=None)
    p.add_argument('-f', '--force',     action='store_true',              default=False,   help="Force creation of new testbench even if it already exists.")
    p.add_argument('-d', '--dbg',       action='store_true',              default=False,   help="Turns on debug lines.")

    try:
        Options = p.parse_args()
    except:
        print "Usage error: See untb -h for more information."
        sys.exit(1)

    verbosity = {False: logging.INFO, True: logging.DEBUG}[Options.dbg]
    Log = utils.get_logger('log', verbosity)
    utg.Log = Log

    try:
        RootDir = utils.calc_root_dir()
    except utils.AreaError:
        Log.critical("CWD is not in an Octeon Tree.")
        sys.exit(255)

    VerifDir = os.path.join(RootDir, "verif")
Exemple #15
0
def user_network():
    """
    Start iteration process for load data to user_network

    """
    log = get_logger()
    with get_connect() as con, get_connect_iptable() as con_ip:
        cur = con.cursor()
        cur_ip = con_ip.cursor()
        # get settings
        settings = get_settings(cur)
        load_date_start = datetime.strptime(settings['LOAD_DATE_START'],
                                            '%Y.%m.%d %H:%M:%S.%f')
        load_pause = int(settings['LOAD_PAUSE'])
        load_pause_empty = int(settings['LOAD_PAUSE_EMPTY'])
        cnt_rows = int(settings['LOAD_ROWS'])

        while True:
            date = user_network_load(cur_ip, cur, load_date_start, cnt_rows)
            log.info("Load data. load_date_start: %s", date)
            # save date between iterations
            if date > load_date_start:
                sql = "update load_settings set value=%s where param=%s"
                date_str = datetime.strftime(date, '%Y.%m.%d %H:%M:%S.%f')
                cur.execute(sql, [date_str, 'LOAD_DATE_START'])
            else:
                # sleep if not new data
                log.debug("No new data sleep( %s )", load_pause_empty)
                time.sleep(load_pause_empty)
            if load_pause:
                log.debug("sleep between loads sleep( %s )", load_pause)
                time.sleep(load_pause)
            load_date_start = date
Exemple #16
0
    def __init__(self, modules):
        """Called when module is loaded"""
        self._modules = modules

        # Set default name if none is set
        if not self.name:
            self.name = class_name(self)

        # Load logger
        self.logger = get_logger(self.name, [stdout])

        section = self.get_config(False)
        # Load enable
        self.enable = None
        for key, value in section:
            if key.lower() == "enable":
                self.enable = value.lower() == "true"
        if self.enable is None:
            raise Exception("Config for module '%s' doesn't have enable = true/false" % self.name)

        # Load interval
        self.interval = None
        for key, value in section:
            if key.lower() == "interval":
                self.interval = float(value)
        if self.interval is None:
            raise Exception("Config for module '%s' doesn't have interval = secs, use 0 for running once" % self.name)
 def initialize(self):
     """
     Initializes the instance with a mongodb database instance
     """
     self.logger = get_logger('profile_handler')
     self._connection = get_mongodb_connection()
     self._db = self._connection[options.mongodb_name]
Exemple #18
0
 def __init__(self, qxbot, webqq, bridges):
     self.logger = get_logger()
     self.qxbot = qxbot
     self.webqq = webqq
     self.uin_qid_map = {}
     self.qid_uin_map = {}
     self.bridges = bridges
     self._maped = False
Exemple #19
0
    def __init__(self, encoder, decoder, optimizer=RMSprop(), logger=get_logger()):
        super(Seq2seq, self).__init__(logger)
        self.encoder = encoder
        self.decoder = decoder

        self.optimizer = optimizer

        self.params = self.encoder.params + self.decoder.params
Exemple #20
0
 def __init__(self, db):
     self.db = db
     self.mutex = threading.Lock()
     self.job_queue = list()
     self.job_queue_sem = threading.Semaphore(0)
     self.processing = list()
     self.quiting = False
     self.logger = utils.get_logger('greedy')
 def initialize(self):
     """
     Initializes the instance with a mongodn database instance
     :param db: an instance to pymongo database object
     """
     self._logger = get_logger('lesson_handler')
     self._connection = get_mongodb_connection()
     self._db = self._connection[options.lesson_db]
Exemple #22
0
def try_group(label_feat):
    label_feat = label_feat.astype(np.int8)
    uniques = np.unique(label_feat)
    if uniques.shape[0] < GROUP_NUM:
        return label_feat
    utils.get_logger().info('dimension={}, needs fitting'.format(uniques.shape[0]))
    fittted_feat = np.zeros(label_feat.shape, dtype=int)
    separators = np.linspace(np.min(label_feat), np.max(label_feat), GROUP_NUM)
    for i in xrange(label_feat.shape[0]):
        for j in xrange(GROUP_NUM - 1):
            # print(separators[j], label_feat[i], separators[j+1])
            if separators[j] <= label_feat[i] < separators[j + 1]:
                fittted_feat[i] = j
                break
        else:
            fittted_feat[i] = GROUP_NUM - 1
    return fittted_feat
Exemple #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--history-host', help='Hostname or IP of the history database')
    parser.add_argument('--history-port', help='Port number for the history database')
    parser.add_argument('--history-user', help='Username for the history database')
    parser.add_argument('--history-password', help='Password for the history database')
    parser.add_argument('--history-database', help='Name of the history database')

    parser.add_argument('--clone-host', help='Hostname or IP of the clone database')
    parser.add_argument('--clone-port', help='Port number for the clone database')
    parser.add_argument('--clone-user', help='Username for the clone database')
    parser.add_argument('--clone-password', help='Password for the clone database')
    parser.add_argument('--clone-database', help='Name of the clone database')

    args = parser.parse_args()

    history = {
        'host': args.history_host,
        'port': args.history_port,
        'user': args.history_user,
        'password': args.history_password,
        'database': args.history_database
    }

    clone = {
        'host': args.clone_host,
        'port': args.clone_port,
        'user': args.clone_user,
        'password': args.clone_password,
        'database': args.clone_database
    }

    histcon, clonecon = connect(history, clone)

    inspector_logger = get_logger('inspector')
    populator_logger = get_logger('populator')

    inspector = HistoryInspector(histcon, logger=inspector_logger)
    populator = ClonePopulator(clonecon, inspector.update, history, logger=populator_logger)
    populator.initialize()
    for schema in inspector.schemas():
        populator.create_schema(schema)
        for table in inspector.tables(schema):
            inspector.columns(table)
            populator.create_table(table)
    clonecon.commit()
Exemple #24
0
def app_root():
    """Base url for steve_zissou service"""
    logger = get_logger(__name__)
    logger.debug('Request made against root url')
    if 'RUN_ENV' in os.environ:
        if os.environ['RUN_ENV'] == 'production':
            return render_template('base.html')
    path_to_js = url_for('static', filename='main.min.js')
    return render_template('base.html', path_to_js=path_to_js)
Exemple #25
0
def run():
    utils.configure_logger('debug')
    logger = utils.get_logger()
    input_directory = sys.argv[1]  # '../datasets/wiki-filtered'
    out_directory = sys.argv[2]  # '../datasets/wiki-senses'
    files = os.listdir(input_directory)
    files = [os.path.join(input_directory, f) for f in files]
    logger.info('total number of files: %d' % len(files))
    create_sense_dataset(files, out_directory)
    logger.info('done')
def run():
    utils.configure_logger('debug')
    logger = utils.get_logger()
    input_directory = sys.argv[1]
    out_directory = sys.argv[2]
    num_of_fold = int(sys.argv[3])
    files = os.listdir(input_directory)
    files = [os.path.join(input_directory, f) for f in files]
    logger.info('total number of files: %d' % len(files))
    create_IMS_formatted_dataset(files, out_directory, k=num_of_fold, num_of_process=30)
    logger.info('done')
Exemple #27
0
def reduce_test(test_id, t_fqdn, t_tank_id, eval_only=False):
    """ Fetch test statuses from Redis, if test finished notify service via API
        and call reduce job.
    """
    ext = {'test_id': test_id}
    logger = get_logger(**ext)

    try:
                                    t_test_id=t_tank_id).get('phout.log')
        reduce_phout(test_id, phout_url, eval_only=eval_only)
    except TankClientError as e:
        logger.error('3rd party API call failed: {}'.format(e))
Exemple #28
0
 def _stop_service(self, container_id):
     docker_api = docker_client.api()
     last_exception = None
     try:
         exec_id = docker_api.exec_create(container_id, 'supervisorctl stop register_in_service_discovery')
         docker_api.exec_start(exec_id['Id'])
     except:
         traceback.print_exc()
     try:
         deregister_services(container_id)
     except:
         traceback.print_exc()
     for i in range(3):
         try:
             docker_api.stop(container_id)
         except Exception as e:
             last_exception = e
             traceback.print_exc()
         if not is_container_running(container_id):
             break
     if is_container_running(container_id):
         get_logger().error('Could not stop container: {}'.format(container_id))
         raise last_exception
Exemple #29
0
def reduce_arts(test_id, files):
    """ Reduce test results without any external API calls,
        used to process result manually uploaded by user
        without any Lunapark or Jenkins connection.
    """
    ext = {'test_id': test_id}
    logger = get_logger(**ext)

    reduce_phout(test_id, files['phout'])

    started_at, finished_at = phout_time_info(files['phout'])
    diff = {
        'started_at': started_at.isoformat(),
        'finished_at': finished_at.isoformat(),
Exemple #30
0
 def __init__(self, config, resume_training=True, model_name='dense_bi_lstm'):
     # set configurations
     self.cfg, self.model_name, self.resume_training, self.start_epoch = config, model_name, resume_training, 1
     self.logger = get_logger(os.path.join(self.cfg.ckpt_path, 'log.txt'))
     # build model
     self._add_placeholder()
     self._add_embedding_lookup()
     self._build_model()
     self._add_loss_op()
     self._add_accuracy_op()
     self._add_train_op()
     print('params number: {}'.format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
     # initialize model
     self.sess, self.saver = None, None
     self.initialize_session()
Exemple #31
0
args = utils.parse_args()

train_path = args.train
dev_path = args.dev
test_path = args.test
batch_size = args.batch_size
emb_to_use = args.emb
emb_path = args.emb_path
lstm_size = args.num_units
num_filters = args.num_filters
lr = args.lr
decay_rate = args.decay_rate
grad_clip = args.grad_clip
gamma = args.gamma

logger = utils.get_logger("BiLSTM-CNN-CRF")

data = data_utils.load_data(args)

# data["train_word"] = train_data
# data["dev_word"] = dev_data
# data["test_word"] = test_data
# data["word_emb_table"] = word_emb_table
# data["word_collection"] = word_collection
# data["char"] = char_data
# data["label_collection"] = label_collection
# data["train_orth"] = train_data_orth
# data["dev_orth"] = dev_data_orth
# data["test_orth"] = test_data_orth
# data["orth_word_emb_table"] = orth_word_emb_table
# data["orth_char"] = orth_char_data
Exemple #32
0
class WorldDaemon:
    """ The world daemon determines whether to update a given world asset (see
    assets/world.py) based on the default 'asset_max_age' in settings.cfg or
    based on the custom 'max_age' attribute of a given asset.

    Since the daemon does not always update all assets, it minimizes resource
    usage and can therefore be left running without whaling on CPU and/or
    physical memory.

    Finally, the world daemon DOES NOT actually refresh/update or otherwise
    gather any data or run any queries. Rather, it initializes a World object
    (see above) and then works with that object as necessary. """
    def __init__(self):

        self.logger = utils.get_logger(log_name="world_daemon",
                                       log_level=settings.get(
                                           "world", "log_level"))

        self.pid_dir = settings.get("application", "pid_root_dir")
        self.pid_file_path = os.path.join(self.pid_dir, "world_daemon.pid")
        self.set_pid()

    def check_pid_dir(self):
        """ Checks to see if the pid directory exists and is writable. Creates a
        a new dir if it needs to do so. Also logs a WARN if the user requesting
        the check is not the owner of the pid dir. """

        if not os.path.isdir(self.pid_dir):
            self.logger.error("PID dir '%s' does not exist!" % self.pid_dir)
            try:
                shutil.os.mkdir(self.pid_dir)
                self.logger.critical("Created PID dir '%s'!" % self.pid_dir)
            except Exception as e:
                self.logger.error("Could not create PID dir '%s'!" %
                                  self.pid_dir)
                self.logger.exception(e)
                sys.exit(255)

        # warn if we're going to start off by trying to write a lock/pid file to
        # some other user's directory, b/c that would be bad
        pid_dir_owner = getpwuid(os.stat(self.pid_dir).st_uid).pw_name
        cur_user = os.getlogin()
        if pid_dir_owner != cur_user:
            self.logger.warn("PID dir owner is not the current user (%s)!" %
                             cur_user)

    def set_pid(self):
        """ Updates 'self.pid' with the int in the daemon pid file. Returns None
        if there is no file or the file cannot be parsed. """
        self.pid = None

        if os.path.isfile(self.pid_file_path):
            try:
                self.pid = int(file(self.pid_file_path, "rb").read().strip())
            except Exception as e:
                self.logger.exception(e)

            try:
                os.kill(self.pid, 0)
            except OSError as e:
                self.logger.exception(e)
                self.logger.error(
                    "PID %s does not exist! Removing PID file..." % self.pid)
                shutil.os.remove(self.pid_file_path)
                self.pid = None

    def command(self, command=None):
        """ Executes a daemon command. Think of this as the router for incoming
        daemon commands/operations. Register all commands here. """

        if command == "start":
            self.start()
        elif command == "stop":
            self.stop()
        elif command == "restart":
            self.stop()
            time.sleep(3)
            self.start()
        elif command == "status":
            pass
        else:
            self.logger.error("Unknown daemon command ('%s')!" % command)

        # sleep a second and dump a status, regardless of command
        time.sleep(1)
        self.dump_status()

    @retry(
        tries=6,
        delay=2,
        jitter=1,
        logger=utils.get_logger(settings.get("world", "log_level")),
    )
    def start(self):
        """ Starts the daemon. """

        self.set_pid()
        if self.pid is not None:
            try:
                os.kill(self.pid, 0)
                self.logger.warn(
                    "PID %s exists! Demon is already running! Exiting..." %
                    self.pid)
                sys.exit(1)
            except OSError:
                self.logger.info("Starting World Daemon...")
        else:
            self.logger.info("Starting World Daemon...")

        # pre-flight sanity checks and initialization tasks
        self.check_pid_dir()

        context = daemon.DaemonContext(
            working_directory=(settings.get("api", "cwd")),
            detach_process=True,
            umask=0o002,
            pidfile=PIDLockFile(self.pid_file_path),
            files_preserve=[self.logger.handlers[0].stream],
        )

        with context:
            while True:
                try:
                    self.run()
                except Exception as e:
                    self.logger.error(
                        "An exception occured during daemonization!")
                    self.logger.exception(e)
                    raise

    def run(self):
        """ A run involves checking all warehouse assets and, if they're older
        than their 'max_age' attrib (default to the world.asset_max_age value),
        it refreshes them.

        Once finished, it sleeps for world.refresh_interval, which is measured
        in minutes. """

        W = World()
        W.refresh_all_assets()
        self.logger.debug("World Daemon will sleep for %s minutes..." %
                          settings.get("world", "refresh_interval"))
        time.sleep(settings.get("world", "refresh_interval") * 60)

    def stop(self):
        """ Stops the daemon. """

        self.set_pid()
        self.logger.warn("Preparing to kill PID %s..." % self.pid)
        if self.pid is not None:
            os.kill(self.pid, 15)
            time.sleep(2)
            try:
                os.kill(self.pid, 0)
            except:
                self.logger.warn("PID %s has been killed." % self.pid)
        else:
            self.logger.debug(
                "Daemon is not running. Ignoring stop command...")

    def get_uptime(self, return_type=None):
        """ Uses the pid file to determine how long the daemon has been active.
        Returns None if the daemon isn't active. Otherwise, this returns a raw
        timedelta. """

        if os.path.isfile(self.pid_file_path):
            pid_file_age = time.time() - os.stat(
                self.pid_file_path)[stat.ST_MTIME]
            ut = timedelta(seconds=pid_file_age)
            uptime = "%sd %sh %sm" % (ut.days, ut.seconds // 3600,
                                      (ut.seconds // 60) % 60)
        else:
            return None

        if return_type == "date":
            return datetime.fromtimestamp(
                os.stat(self.pid_file_path)[stat.ST_MTIME])

        return uptime

    def dump_status(self, output_type="CLI"):
        """ Prints daemon status to stdout. """

        active = False
        d = {"active": active}
        if self.pid is not None and os.path.isfile(self.pid_file_path):
            active = True

        if active:
            owner_uid = os.stat(self.pid_file_path).st_uid
            owner_name = getpwuid(owner_uid).pw_name

            try:
                utils.mdb.world.find()
            except Exception as e:
                self.logger.error(
                    "Daemon is active, but MDB cannot be reached!")
                self.logger.exception(e)
                raise

            d = {}
            d["active"] = active
            d["up_since"] = self.get_uptime("date")
            d["uptime_hms"] = self.get_uptime()
            d["owner_uid"] = owner_uid
            d["owner_name"] = owner_name
            d["pid"] = self.pid
            d["pid_file"] = self.pid_file_path
            d["assets"] = utils.mdb.world.find().count()

        if output_type == dict:
            return d
        elif output_type == "CLI":
            spacer = 15
            print("\n\tWorld Daemon stats:\n")
            for k, v in sorted(d.iteritems()):
                utils.cli_dump(k, spacer, v)
            print("\n")
Exemple #33
0
def _extract_axioms(graph, patterns) -> dict:
    """Run actual axiom extraction on CaLiGraph."""
    utils.get_logger().debug('CaLi2Ax: Extracting axioms..')
    axioms = defaultdict(set)

    front_pattern_dict = {}
    for (front_pattern,
         back_pattern), axiom_patterns in _get_confidence_pattern_set(
             patterns, True, False).items():
        cat_axioms._fill_dict(
            front_pattern_dict,
            list(front_pattern), lambda d: cat_axioms._fill_dict(
                d, list(reversed(back_pattern)), axiom_patterns))

    back_pattern_dict = {}
    for (front_pattern,
         back_pattern), axiom_patterns in _get_confidence_pattern_set(
             patterns, False, True).items():
        cat_axioms._fill_dict(
            back_pattern_dict,
            list(front_pattern), lambda d: cat_axioms._fill_dict(
                d, list(reversed(back_pattern)), axiom_patterns))

    enclosing_pattern_dict = {}
    for (front_pattern,
         back_pattern), axiom_patterns in _get_confidence_pattern_set(
             patterns, True, True).items():
        cat_axioms._fill_dict(
            enclosing_pattern_dict,
            list(front_pattern), lambda d: cat_axioms._fill_dict(
                d, list(reversed(back_pattern)), axiom_patterns))

    for node in graph.content_nodes:
        property_frequencies = graph.get_property_frequencies(node)

        node_labels = set()
        for part in graph.get_parts(node):
            if cat_util.is_category(part):
                node_labels.add(cat_util.category2name(part))
            elif list_util.is_listcategory(part) or list_util.is_listpage(
                    part):
                node_labels.add(list_util.list2name(part))

        labels_without_by_phrases = [
            nlp_util.remove_by_phrase(label, return_doc=True)
            for label in node_labels
        ]
        for node_doc in labels_without_by_phrases:
            node_axioms = []

            front_prop_axiom = _find_axioms(front_pattern_dict, node, node_doc,
                                            property_frequencies)
            if front_prop_axiom:
                node_axioms.append(front_prop_axiom)

            back_prop_axiom = _find_axioms(back_pattern_dict, node, node_doc,
                                           property_frequencies)
            if back_prop_axiom:
                node_axioms.append(back_prop_axiom)

            enclosing_prop_axiom = _find_axioms(enclosing_pattern_dict, node,
                                                node_doc, property_frequencies)
            if enclosing_prop_axiom:
                node_axioms.append(enclosing_prop_axiom)

            prop_axioms_by_pred = {
                a[1]: {x
                       for x in node_axioms if x[1] == a[1]}
                for a in node_axioms
            }
            for pred, similar_prop_axioms in prop_axioms_by_pred.items():
                if dbp_store.is_object_property(pred):
                    res_labels = {
                        a[2]: dbp_store.get_label(a[2])
                        for a in similar_prop_axioms
                    }
                    similar_prop_axioms = {
                        a
                        for a in similar_prop_axioms
                        if all(res_labels[a[2]] == val
                               or res_labels[a[2]] not in val
                               for val in res_labels.values())
                    }
                best_prop_axiom = max(similar_prop_axioms,
                                      key=operator.itemgetter(3))
                axioms[node].add(best_prop_axiom)

    utils.get_logger().debug(
        f'CaLi2Ax: Extracted {sum(len(axioms) for axioms in axioms.values())} axioms for {len(axioms)} categories.'
    )
    return axioms
Exemple #34
0
# -*- coding: utf-8 -*-
"""
 Created by Dr.W on 2018/5/16
"""

import os
import config
from utils import get_yam
from utils import set_time
from utils import get_logger
from utils.phone import Phone
from entity.tasks import Tasks
from multiprocessing import Pool
from utils.appium import AppiumServer

log = get_logger("runner")


def selector_devices(ds):
    res = []
    for d in ds.get("appium", []):
        if d.get("closed", False):
            log.info("this device is closed.")
            continue
        res.append(d)
    return res


def get_devices():
    ds = get_yam(config.DEVICES_PATH)
    return selector_devices(ds)
    parser.add_argument(
        "--ocm-url",
        default=
        "https://api-integration.6943.hive-integration.openshiftapps.com")
    parser.add_argument("--ocp-versions")
    parser.add_argument("--installation-timeout", type=int)
    parser.add_argument("--public-registries", default="")
    parser.add_argument("--img-expr-time", default="")
    parser.add_argument("--img-expr-interval", default="")
    parser.add_argument("--check-cvo", default="False")

    return deployment_options.load_deployment_options(parser)


deploy_options = handle_arguments()
log = utils.get_logger('deploy-service-configmap')

SRC_FILE = os.path.join(os.getcwd(), 'deploy/assisted-service-configmap.yaml')
DST_FILE = os.path.join(os.getcwd(), 'build', deploy_options.namespace,
                        'assisted-service-configmap.yaml')
SERVICE = "assisted-service"


def get_deployment_tag(args):
    if args.deploy_manifest_tag:
        return args.deploy_manifest_tag
    if args.deploy_tag:
        return args.deploy_tag


def main():
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as tf
from spatial_transformer3 import transformer
import numpy as np
from tf_utils import weight_variable, bias_variable, dense_to_one_hot
import cv2
from resnet import output_layer
import get_data
from config import *
import time
from tensorflow.contrib.slim.nets import resnet_v2
slim = tf.contrib.slim
import utils
logger = utils.get_logger()


def get_4_pts(theta, batch_size):
    with tf.name_scope('get_4_pts'):
        pts1_ = []
        pts2_ = []
        pts = []
        h = 2.0 / grid_h
        w = 2.0 / grid_w
        tot = 0
        one_ = tf.ones([batch_size, 1, 2]) / do_crop_rate
        temp_p = [
            0.5, 0.5, 0, 0.5, -0.5, 0.5, 0.5, 0, 0.3, 0.3, -0.5, 0, 0.5, -0.5,
            0, -0.5, -0.5, -0.5
        ]
Exemple #37
0
def main():
    config = Config()

    logger, log_dir = get_logger(os.path.join(config.model, "logs/"))
    logger.info("=======Model Configuration=======")
    logger.info(config.desc)
    logger.info("=================================")

    try:
        train_x, dev_x, test_x, train_y, dev_y, test_y, train_m, dev_m, test_m, test_dt = load_agg_selected_data_mem(data_path=config.data_path, \
            x_len=config.x_len, \
            y_len=config.y_len, \
            foresight=config.foresight, \
            cell_ids=config.train_cell_ids, \
            dev_ratio=config.dev_ratio, \
            test_len=config.test_len, \
            seed=config.seed)

        model = Model(config)
        if config.allow_gpu:
            model = model.cuda()

        optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)

        # get train data
        TrainDataSet = BatchDataset(train_x, train_m, train_y)
        TrainSampler = tud.RandomSampler(TrainDataSet)
        TrainDataLoader = tud.DataLoader(TrainDataSet,
                                         batch_size=config.batch_size,
                                         sampler=TrainSampler,
                                         num_workers=2)

        # get valid Data
        dev_x, dev_m, dev_y = torch.Tensor(dev_x), torch.Tensor(
            dev_m), torch.Tensor(dev_y)

        if config.allow_gpu:
            dev_x, dev_m, dev_y = dev_x.cuda(), dev_m.cuda(), dev_y.cuda()

        step = 0
        no_improv = 0
        best_loss = 100
        model_dir = make_date_dir(os.path.join(config.model, 'model_save/'))
        logger.info("Start training")

        start_time = time()
        for i in range(config.num_epochs):
            epoch = i + 1

            # train
            model.train()
            for batch_x, batch_m, batch_y in TrainDataLoader:
                step = step + 1

                if config.allow_gpu:
                    batch_x, batch_m, batch_y = batch_x.cuda(), batch_m.cuda(
                    ), batch_y.cuda()

                optimizer.zero_grad()
                prediction, loss, rse, smape, mae = model(
                    batch_x, batch_m, batch_y)

                loss.backward()
                grad_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(), config.clip)
                optimizer.step()

                if step % 100 == 0:
                    logger.info(
                        "epoch: %d, step: %d, loss: %.4f, rse: %.4f, smape: %.4f, mae: %.4f"
                        % (epoch, step, loss, rse, smape, mae))

            # dev score for each epoch (no mini batch)
            with torch.no_grad():
                model.eval()
                prediction, dev_loss, dev_rse, dev_smape, dev_mae = model(
                    dev_x, dev_m, dev_y)

            if dev_loss < best_loss:
                best_loss = dev_loss
                no_improv = 0
                # logger.info("New score! : dev_loss: %.4f, dev_rse: %.4f, dev_smape: %.4f, dev_mae: %.4f" %
                #             (dev_loss, dev_rse, dev_smape, dev_mae))
                # logger.info("Saving model at {}".format(model_dir))
                torch.save(model, model_dir + "/" + config.model + ".pth")
            else:
                no_improv += 1
                if no_improv == config.nepoch_no_improv:
                    logger.info("No improvement for %d epochs" % no_improv)
                    break

        elapsed = time() - start_time
        # generating results (no mini batch)
        logger.info("Saving model at {}".format(model_dir))
        logger.info("Elapsed training time {0:0.2f} mins".format(elapsed / 60))
        logger.info("Training finished, exit program")

    except:
        logger.exception("ERROR")
Exemple #38
0
import utils

if __name__ == '__main__':
    print(id(utils.get_logger()))
    print(id(utils.get_logger()))
Exemple #39
0
import pydicom as dicom
from os.path import join as pjoin
import os
import sys
import numpy as np
from dicom_utils import get_patient_data
from utils import get_logger

logger = get_logger(__name__)

class SaleReader:
    def __init__(self, path):
        self.path = path
        self.images = []
        self.num_images = 0
        self.dcm_paths = []
        self.patient_data = None
        self.broken = False
        self._read_images()
        
    def _read_images(self):
        # find .dcm files
        files = [f for f in os.listdir(self.path) if f.lower().endswith('.dcm')]
        if len(files) == 0:
            self.broken = True
            logger.warning('There are no frames. This folder should be deleted. Path: {}'.format(self.path))
            return
        
        slice_sequences_by_imagetype_by_seriesdesc = \
            self._read_slice_sequences(files)
        
import numpy as np
import scipy.special
import time

from edge_selection_tree import evaluate_accept_reject_outcome
from kidney_ip import solve_picef_model
from utils import get_logger, query_node_hash, outcome_node_hash

logger = get_logger()

LARGE_UCB_CONST = 1000


def create_mcts_policy():
    def mcts_get_next_edge(graph, queried_edges, edge_rejections, tree):

        # prepare the tree by resetting the root and removing unneeded query nodes.
        accepted_edges = [
            e for e, r in zip(queried_edges, edge_rejections) if not r
        ]
        rejected_edges = [
            e for e, r in zip(queried_edges, edge_rejections) if r
        ]

        new_root = MultistageOutcomeNode(None, accepted_edges, rejected_edges,
                                         tree)
        tree.prepare_tree(new_root)

        best_child = tree.search_next_child()

        if best_child is None:
Exemple #41
0
def train():
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences])
                )
            )
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(
        train_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    dev_data = prepare_dataset(
        dev_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    test_data = prepare_dataset(
        test_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    print("%i / %i / %i sentences in train / dev / test." % (
        len(train_data), len(dev_data), len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(FLAGS.max_epoch):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                        iteration, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
Exemple #42
0
def train(args):
    utils.make_workspace_dirs(args.workspace)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    anchors = np.loadtxt(args.anchors) if args.anchors else None
    # Shared size between collate_fn and scale sampler.
    shared_size = torch.IntTensor(args.in_size).share_memory_()
    scale_sampler = utils.TrainScaleSampler(shared_size, args.scale_step,
                                            args.rescale_freq)
    logger = utils.get_logger(path=os.path.join(args.workspace, 'log.txt'))

    torch.backends.cudnn.benchmark = True

    dataset = ds.HotchpotchDataset(args.dataset_root,
                                   cfg='./data/train.txt',
                                   backbone=args.backbone,
                                   augment=True)
    collate_fn = partial(ds.collate_fn, in_size=shared_size)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              args.batch_size,
                                              True,
                                              num_workers=args.workers,
                                              collate_fn=collate_fn,
                                              pin_memory=args.pin,
                                              drop_last=True)

    num_ids = int(dataset.max_id + 1)
    if args.backbone == 'darknet':
        model = darknet.DarkNet(anchors,
                                num_classes=args.num_classes,
                                num_ids=num_ids).to(device)
    elif args.backbone == 'shufflenetv2':
        model = shufflenetv2.ShuffleNetV2(anchors,
                                          num_classes=args.num_classes,
                                          num_ids=num_ids,
                                          model_size=args.thin,
                                          box_loss=args.box_loss,
                                          cls_loss=args.cls_loss).to(device)
    elif args.backbone == 'sosnet':
        model = sosmot.SOSMOT(anchors,
                              num_classes=args.num_classes,
                              num_ids=num_ids,
                              box_loss=args.box_loss,
                              cls_loss=args.cls_loss).to(device)
    else:
        print('unknown backbone architecture!')
        sys.exit(0)
    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))

    params = [p for p in model.parameters() if p.requires_grad]
    backbone_neck_params, detection_params, identity_params = grouping_model_params(
        model)
    if args.optim == 'sgd':
        # optimizer = torch.optim.SGD(params, lr=args.lr,
        #     momentum=args.momentum, weight_decay=args.weight_decay)
        optimizer = torch.optim.SGD([{
            'params': backbone_neck_params
        }, {
            'params': detection_params,
            'lr': args.lr * args.lr_coeff[1]
        }, {
            'params': identity_params,
            'lr': args.lr * args.lr_coeff[2]
        }],
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(params,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    if args.freeze_bn:
        for name, param in model.named_parameters():
            if 'norm' in name:
                param.requires_grad = False
                logger.info('freeze {}'.format(name))
            else:
                param.requires_grad = True

    trainer = f'{args.workspace}/checkpoint/trainer-ckpt.pth'
    if args.resume:
        trainer_state = torch.load(trainer)
        optimizer.load_state_dict(trainer_state['optimizer'])

    if -1 in args.milestones:
        args.milestones = [int(args.epochs * 0.5), int(args.epochs * 0.75)]
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.lr_gamma)

    start_epoch = 0
    if args.resume:
        start_epoch = trainer_state['epoch'] + 1
        lr_scheduler.load_state_dict(trainer_state['lr_scheduler'])

    logger.info(args)
    logger.info('Start training from epoch {}'.format(start_epoch))
    model_path = f'{args.workspace}/checkpoint/{args.savename}-ckpt-%03d.pth'

    for epoch in range(start_epoch, args.epochs):
        model.train()
        logger.info(('%8s%10s%10s' + '%10s' * 8) %
                    ('Epoch', 'Batch', 'SIZE', 'LBOX', 'LCLS', 'LIDE', 'LOSS',
                     'SBOX', 'SCLS', 'SIDE', 'LR'))

        rmetrics = defaultdict(float)
        optimizer.zero_grad()
        for batch, (images, targets) in enumerate(data_loader):
            warmup = min(args.warmup, len(data_loader))
            if epoch == 0 and batch <= warmup:
                lr = args.lr * (batch / warmup)**4
                for i, g in enumerate(optimizer.param_groups):
                    g['lr'] = lr * args.lr_coeff[i]

            loss, metrics = model(images.to(device), targets.to(device),
                                  images.shape[2:])
            loss.backward()

            if args.sparsity:
                model.correct_bn_grad(args.lamb)

            num_batches = epoch * len(data_loader) + batch + 1
            if ((batch + 1) % args.accumulated_batches
                    == 0) or (batch == len(data_loader) - 1):
                optimizer.step()
                optimizer.zero_grad()

            for k, v in metrics.items():
                rmetrics[k] = (rmetrics[k] * batch + metrics[k]) / (batch + 1)

            fmt = tuple([('%g/%g') % (epoch, args.epochs), ('%g/%g') % (batch,
                len(data_loader)), ('%gx%g') % (shared_size[0].item(), shared_size[1].item())] + \
                list(rmetrics.values()) + [optimizer.param_groups[0]['lr']])
            if batch % args.print_interval == 0:
                logger.info(('%8s%10s%10s' + '%10.3g' *
                             (len(rmetrics.values()) + 1)) % fmt)

            scale_sampler(num_batches)

        torch.save(model.state_dict(), f"{model_path}" % epoch)
        torch.save(
            {
                'epoch': epoch,
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict()
            }, trainer)

        if epoch >= args.eval_epoch:
            pass
        lr_scheduler.step()
Exemple #43
0
parser.add_argument("--exp", type=int, default=0, help="experiment id number")

args = parser.parse_args()
args.mem = args.recurrence > 1

# Define run dir

suffix = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
default_model_name = "{}_{}_seed{}_{}".format(args.env, args.algo, args.seed,
                                              suffix)
model_name = args.model or default_model_name
model_dir = utils.get_model_dir(args.env, model_name, args.exp, args.seed)

# Define logger, CSV writer, json args, and Tensorboard writer

logger = utils.get_logger(model_dir)
csv_file, csv_writer = utils.get_csv_writer(model_dir)
if args.tb:
    from tensorboardX import SummaryWriter
    tb_writer = SummaryWriter(model_dir)
with open(model_dir + '/args.json', 'w') as outfile:
    json.dump(vars(args), outfile)

# Log command and all script arguments

logger.info("{}\n".format(" ".join(sys.argv)))
logger.info("{}\n".format(args))

# Set seed for all randomness sources

utils.seed(args.seed)
Exemple #44
0
import sys, string, os
import json, pickle
import numpy as np
from collections import defaultdict
from data_utils import *
import gensim, utils

MAX_LENGTH = 120
MAX_CHAR_PER_WORD = 45
root_symbol = "##ROOT##"
root_label = "<ROOT>"
word_end = "##WE##"
logger = utils.get_logger("LoadData")


def read_conll_sequence_labeling(path,
                                 word_alphabet,
                                 label_alphabet,
                                 train_abble=True,
                                 out_dir=None):
    """
    read data from file in conll format
    :param path: file path
    :return: sentences of words and labels, sentences of indexes of words and labels.
    """
    #word_alphabet = []
    #label_alphabet = ['O', "PER", "MISC", "ORG", "LOC"]
    word_sentences = []
    label_sentences = []

    word_index_sentences = []
Exemple #45
0
                    }
                    save_path = os.path.join(
                        cfg["training"]["checkpoint"],
                        "{}_{}_best_model.pkl".format(cfg["model"]["arch"],
                                                      cfg["data"]["dataset"]),
                    )
                    torch.save(state, save_path)
        epoch += 1


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="config")
    parser.add_argument(
        "--config",
        nargs="?",
        type=str,
        default="configs/fcn8s_pascal.yml",
        help="Configuration file to use",
    )

    args = parser.parse_args()

    with open(args.config) as fp:
        cfg = yaml.load(fp, Loader=yaml.Loader)

    logdir = cfg["training"]["checkpoint"]
    logger = utils.get_logger(logdir)
    shutil.copy(args.config, logdir)

    train(cfg, logger)
Exemple #46
0
import time
from watchdog.observers import Observer as WatchObserver
from watchdog.events import FileSystemEventHandler
from utils import RABBITMQ_HOST, get_logger
from kombu import Connection, Queue, uuid, Consumer, Producer
import difflib
import signal

log = get_logger()


class Observer():
    def __init__(self):
        self.worker = None
        signal.signal(signal.SIGTERM, self.exit)
        signal.signal(signal.SIGINT, self.exit)
        signal.signal(signal.SIGCHLD, signal.SIG_IGN)

    def run(self):
        observer = WatchObserver()

        dirname = '/etc/artemis'
        filename = 'config.yaml'

        try:
            with Connection(RABBITMQ_HOST) as connection:
                event_handler = self.Handler(dirname, filename, connection)
                observer.schedule(event_handler, dirname, recursive=False)
                observer.start()
                log.info('started')
                self.should_stop = False
Exemple #47
0
timestamp = str(int(time.time())) if args.mode == 'train' else args.demo_model
output_path = os.path.join('.', args.train_data + "_save", timestamp)
if not os.path.exists(output_path): os.makedirs(output_path)
summary_path = os.path.join(output_path, "summaries")
paths['summary_path'] = summary_path
if not os.path.exists(summary_path): os.makedirs(summary_path)
model_path = os.path.join(output_path, "checkpoints")
if not os.path.exists(model_path): os.makedirs(model_path)
ckpt_prefix = os.path.join(model_path, "model")
paths['model_path'] = ckpt_prefix
result_path = os.path.join(output_path, "results")
paths['result_path'] = result_path
if not os.path.exists(result_path): os.makedirs(result_path)
log_path = os.path.join(result_path, "log.txt")
paths['log_path'] = log_path
get_logger(log_path).info(str(args))

if __name__ == '__main__':

    build_word_index(args.word_embedding_file, args.src_vocab_file,
                     args.tgt_file, args.tgt_vocab_file)

    src_vocab = get_vocab(args.src_vocab_file)
    src_vocab_size = len(src_vocab)
    src_unknown = src_vocab_size
    src_padding = src_vocab_size + 1
    #print(len(src_vocab))
    #print(vocab_size)

    tgt_vocab = get_vocab(args.tgt_vocab_file)
    tgt_vocab_size = len(tgt_vocab)
import argparse
import os

import yaml

import deployment_options
import utils

parser = argparse.ArgumentParser()
deploy_options = deployment_options.load_deployment_options(parser)
log = utils.get_logger('deploy-image-service')

SRC_FILE = os.path.join(os.getcwd(), 'deploy/assisted-image-service.yaml')
DST_FILE = os.path.join(os.getcwd(), 'build', deploy_options.namespace,
                        'assisted-image-service.yaml')


def main():
    utils.verify_build_directory(deploy_options.namespace)

    with open(SRC_FILE, "r") as src:
        log.info(
            f"Loading source template file for assisted-image-service: {SRC_FILE}"
        )
        raw_data = src.read()
        raw_data = raw_data.replace('REPLACE_NAMESPACE',
                                    f'"{deploy_options.namespace}"')
        raw_data = raw_data.replace('REPLACE_IMAGE_SERVICE_IMAGE',
                                    os.environ.get("IMAGE_SERVICE"))
        data = yaml.safe_load(raw_data)
        log.info(data)
Exemple #49
0
import utils
from models.augment_cnn import AugmentCNN
from torch.utils.data import Dataset, DataLoader
import scipy.io as io
from thop import profile
from thop import clever_format

config = AugmentConfig()

device = torch.device("cuda")

# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)

logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)


def main():
    logger.info("Logger is set - training start")
    fileRoot = r'/home/hlu/Data/VIPL'
    saveRoot = r'/home/hlu/Data/VIPL_STMap' + str(config.fold_num) + str(config.fold_index)
    n_classes = 1
    input_channels = 3
    input_size = np.array([64, 300])
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    toTensor = transforms.ToTensor()
    resize = transforms.Resize(size=(64, 300))
    # set default gpu device id
import argparse
from utils import get_logger
import pandas as pd
from utils import read_file_contents_list
import numpy as np
import os
import matplotlib.pyplot as plt

logger = get_logger('Dice')


def get_dice_data_table(dice_data_root, item_list, num_scans, mask_flag):
    data_table = np.zeros((num_scans, len(item_list)))
    for item_idx in range(len(item_list)):
        item = item_list[item_idx]
        dice_csv_path = os.path.join(dice_data_root, item)
        dice_csv_path = os.path.join(dice_csv_path, mask_flag + '.csv')
        logger.info(f'Read dice csv: {dice_csv_path}')
        dice_df = pd.read_csv(dice_csv_path)
        np_list = dice_df['Dice'].to_numpy()
        data_table[:, item_idx] = np_list[:]
    return data_table


def main():
    parser = argparse.ArgumentParser('Box plot for dice statistics.')
    parser.add_argument('--dice-data-root', type=str)
    parser.add_argument('--item-list', type=str)
    parser.add_argument('--out-fig-folder', type=str)
    parser.add_argument('--num-scans', type=int, default=50)
    args = parser.parse_args()
    --region=       Snapshot region (default: current region)
    --size=         Image rootfs size (default: snapshot size)
    --name=         Image name (default: snapshot name, ie. $turnkey_version.ebs)
    --desc=         Image website link (default: enumerated from name)
    --arch=         Image architecture (default: enumerated from name)

"""
import sys
import getopt

import utils

from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping

log = utils.get_logger('ebs-register')


class Error(Exception):
    pass


def usage(e=None):
    if e:
        print >> sys.stderr, "error: " + str(e)

    print >> sys.stderr, "Syntax: %s [ -options ] snapshot_id" % (sys.argv[0])
    print >> sys.stderr, __doc__.strip()

    sys.exit(1)
import pandas as pd
import os
import subprocess
from parse import parse
from fit_circle import fit_circle_algebraic
import matplotlib.pyplot as plt
from matplotlib import colors
from utils import mkdir_p
import nibabel as nib
from scipy import ndimage as ndi
import skimage.morphology
import skimage.measure
import argparse
import SimpleITK as sitk

logger = get_logger('Inference the CT native padding mask')


def create_body_mask(in_img, out_mask):
    rBody = 2

    print(f'Get body mask of image {in_img}')

    image_itk = sitk.ReadImage(in_img)

    gaussian_filter = sitk.DiscreteGaussianImageFilter()
    gaussian_filter.SetVariance(2)
    print(f'Apply gaussian filter')
    filtered_image = gaussian_filter.Execute(image_itk)
    image_np = sitk.GetArrayFromImage(filtered_image)
from model import Model
import torch
from hyperparams import hps
from data_batcher import Vocab, restore_text
from utils import (get_logger, DEBUG, tonp, Timer)

logger = get_logger(__name__, DEBUG)


def train():
    logger.info('start building vocab data')

    vocab = Vocab(hps.vocab_file, hps.vocab_size)

    logger.info('end building vocab data')
    logger.info('vocab size: %s' % vocab.size())

    model = Model(vocab, hps)
    if hps.use_cuda:
        model = model.cuda()
    if hps.restore is not None:
        # raise ValueError('Noe data to restore')
        model.load_state_dict(torch.load(hps.restore))

    logger.info('----Start training----')
    timer = Timer()
    timer.start()
    for step in range(hps.start_step, hps.num_iters + 1):

        # Forward -------------------------------------------------------------
        outputs = model(None, None, 'infer')
Exemple #54
0
with open(FLAGS.map_file, "rb") as f:
    char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
# 创建相应文件夹
make_path(FLAGS)
# 配置文件读取
if os.path.isfile(FLAGS.config_file):
    config = load_config(FLAGS.config_file)
else:
    config = config_model(char_to_id, tag_to_id)
    save_config(config, FLAGS.config_file)
# 初始化Flask,并且渲染静态页面
app = Flask(__name__, static_url_path="/static")

# 加载配置文件
log_path = os.path.join("log", FLAGS.log_file)
logger = get_logger(log_path)
# 配置GPU
tf_config = tf.ConfigProto()
sess = tf.Session(config=tf_config)
# 加载训练好的模型
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config,
                     id_to_char, logger)


def heartbeat():
    print(
        time.strftime('%Y-%m-%d %H:%M:%S - heartbeat',
                      time.localtime(time.time())))
    timer = threading.Timer(60, heartbeat)
    timer.start()
Exemple #55
0
""" Module for handling data requests from the client. """

from flask_socketio import emit

from database import get_cities, get_objects_in_sector
from global_context import CITIES, RESOURCE_NODES
from models import City, ResourceNode, Ship, Tile
from utils import get_logger
from world.coordinates import Coordinate
from server import current_user, database_session, login_required, socketio

LOGGER = get_logger(__name__)


@socketio.on("get_cities")
@login_required
def send_cities(message):  # pylint: disable=unused-argument
    """ Trigger a 'load_city' event for each city in the game. """
    LOGGER.debug(f"City list requested by {current_user}")
    for city in get_cities(database_session,
                           current_user.ship.location.sector):
        resources = {}
        for resource_held in city.resources:
            resources[resource_held.resource.name] = resource_held.amount

        response = {
            "position": city.location.coordinate,
            "id": city.id,
            "name": city.name,
            "resources": resources,
            "population": city.population,
from pyppeteer_spider.spider import PyppeteerSpider
from utils import short_sleep, get_logger
from datetime import datetime
from pprint import pformat
from pathlib import Path
import aioredis
import asyncio
import json
import csv
import sys


logger = get_logger(logger_name="pinterest",
                    log_save_path=Path("../logs/pinterest.log"))

redis_client = None
prof_data_key = 'pinterest_prof_data'
proc_dir_pages = 'pinterest_processed_dir_pages'

data_file_header = ['name','username','url','location','bio','posts','followerCount','followingCount','avatar_url','domain','crawled','status_code']

file_stem = 'pinterest_user_data'
file_rows = 0

data_dir = Path(f"../data/{file_stem}")
data_dir.mkdir(exist_ok=True,parents=True)

# find current file for storing profile data.
data_file_numbers = [int(f.stem.split('.')[-1]) for f in data_dir.glob("*.tsv")]
if len(data_file_numbers) > 1:
    file_idx = max(data_file_numbers)
Exemple #57
0
#!/usr/bin/env python3.6
from os import environ, system
from sys import exit, argv
from utils import filebot, categories, get_logger
from config import config
from cleaner import clean

logger = get_logger('PlexBotDeluge')
logger.info("Starting PlexBotTorrent...")
base = config.torrents.strip('"')
torrent_id = argv[1]
torrent_name = argv[2]
torrent_dir = argv[3]
logger.info("Torrent ID: " + torrent_id)

if torrent_dir == base + "/Movies":
    category = "movies"
elif torrent_dir == base + "/Shows":
    category = "tv"
else:
    syslog("Invalid Torrent")
    exit(0)

cat = categories(category)
args = cat + " --torrent"
logger.info("FileBot Args: " + args)
filebot(args, None, False)
clean(torrent_id);
logger.info("Done :():")
exit(0)
Exemple #58
0
            next_state[2] = self.value_table[str(state)]

        if col != self.width - 1:
            next_state[3] = self.value_table[str([col + 1, row])]
        else:
            next_state[3] = self.value_table[str(state)]

        return next_state


# 메인 함수
if __name__ == "__main__":
    # 0 이면 every-visit
    # 1 이면 first-visit

    logger = get_logger('mc.log')

    visit = [1]
    env = Env()

    EveryVisit0_FirstVisit1 = 1

    agent = MCAgent(actions=list(range(env.n_actions)))
    success_cnt = 0
    fail_cnt = 0
    total_step = 0

    MAX_EPISODES = 1000  # 최대 에피소드 수
    for episode in range(MAX_EPISODES):
        state = env.reset()  # 에피소드 시작 : 환경을 초기화하고, 상태 = 초기상태로 설정
        action = agent.get_action(state)
Exemple #59
0
    def __init__(self,
                 writer,
                 log_interval,
                 num_of_children,
                 input_channels,
                 input_shape,
                 num_classes,
                 learning_rate_child,
                 momentum_child,
                 num_branches,
                 num_of_layers,
                 out_filters,
                 controller_size,
                 controller_layers,
                 t0,
                 eta_min,
                 t_mult,
                 epoch_child,
                 isShared,
                 eps_clip=0.2,
                 K_epochs=10,
                 path=""):

        self.writer = writer
        self.log_interval = log_interval
        self.path = path

        self.num_of_children = num_of_children
        self.K_epochs = K_epochs

        self.input_shape = input_shape
        self.input_channels = input_channels
        self.num_classes = num_classes
        self.out_filters = out_filters

        self.isShared = isShared

        self.epoch_child = epoch_child

        self.num_branches = num_branches
        self.num_layers = num_of_layers

        self.learning_rate_child = learning_rate_child
        self.momentum = momentum_child

        self.t0 = t0
        self.eta_min = eta_min
        self.t_mult = t_mult

        self.eps_clip = eps_clip

        self.controller = PPOController(
            self.writer, self.num_layers, self.num_branches, controller_size,
            controller_layers
        )  # self,num_layers=2,num_branches=4,lstm_size=5,lstm_num_layers=2,tanh_constant=1.5,temperature=None

        self.controller_old = PPOController(
            self.writer, self.num_layers, self.num_branches, controller_size,
            controller_layers
        )  # self,num_layers=2,num_branches=4,lstm_size=5,lstm_num_layers=2,tanh_constant=1.5,temperature=None

        self.controller.load_state_dict(self.controller_old.state_dict())

        self.children = list()

        self.bestchilds = Kindergarden(best_of=3)

        self.globaliter = 0
        self.logger = get_logger()

        if self.isShared:
            self.child = SharedEnasChild(self.num_layers,
                                         self.learning_rate_child,
                                         self.momentum,
                                         num_classes=self.num_classes,
                                         out_filters=self.out_filters,
                                         input_shape=self.input_shape,
                                         input_channels=self.input_channels,
                                         t0=t0,
                                         eta_min=eta_min)
import aiohttp
import json
import asyncio
import re
from bs4 import BeautifulSoup

import sys
import os
sys.path.append(os.pardir)

from utils import get_proxy, get_ua, start_request, get_logger

logger = get_logger('jingxi_goods_analysis')


async def make_headers(session):
    ua = await get_ua(session)
    headers = {'user-agent': ua}
    return headers


class GoodsAnalysis:
    def __init__(self):
        self.data = []

    async def get_page_data(self, session, sku_id):
        url = f'https://m.jingxi.com/item/view?sku={sku_id}'
        for i in range(3):
            headers = await make_headers(session)
            proxy = await get_proxy(session)
            res = await start_request(session,