def main(): parser = argparse.ArgumentParser() parser.add_argument('-v', action='store_true', dest='verbose', help='be more verbose') parser.add_argument('--engine', type=str, default='podman', dest='engine', help='choose engine between "docker" and "podman"') subparsers = parser.add_subparsers() target_instances = {} for name, target in targets.items(): target_instances[name] = target(None, subparsers) for count, arg in enumerate(sys.argv, 1): if arg in targets: instance = target_instances[arg] if hasattr(instance, 'main'): instance.argv = sys.argv[count:] instance.set_args() args = parser.parse_args() Config.add_args(vars(args)) instance.main() sys.exit(0) parser.print_help()
def post_data(self, data): post_data = POST_DATA_FORMAT.format(self.sensor.photon_id, self.sensor.sensor_name, data, time.time()) http = httplib.HTTPConnection(Config.get("WEBSERVER_POST_URL")) resp = http.request("POST", Config.get("WEBSERVER_POST_URI"), post_data)
def test_auditActivities(self): config = Config() config.merge(self.APP_INI) config["source"] = dict(type = "graphite") cherrypy.app = config mr = MainRoot() a = mr._activityStorage for d in a.find(): a.delete(d['_id']) now = datetime.datetime.utcnow() then = now - datetime.timedelta(days = 7.1) a.save({ '_id': '10', 'ts': mr.tsFormat(then) }) a.save({ '_id': '11', 'ts': mr.tsFormat(now) }) mr._auditActivities() remaining = list(a.find()) self.assertEqual(1, len(remaining)) self.assertEqual('11', remaining[0]['_id']) # Add another and make sure audit doesn't happen because delay hasn't # occurred yet. a.save({ '_id': '12', 'ts': mr.tsFormat(then) }) mr._auditActivities() remaining = list(a.find()) self.assertEqual(2, len(remaining)) mr.AUDIT_INTERVAL = 0.0 mr._auditActivities() remaining = list(a.find()) self.assertEqual(1, len(remaining))
def ok_pressed(self): Config.save_config(self.ui.text_sids.text()) self.main_window.refresh_theme_list() self.main_window.show() self.close()
def main(training_from_scratch, args): if (training_from_scratch): text = open(args.filename, 'rb').read().decode(encoding='utf-8') text, char2idx, idx2char = preprocessing( text, args.checkpoint_dir, args.minocc) # note that we are replacing the text here vocab_size = len(idx2char) config = Config(vocab_size, args.epochs) model = build_model(config) else: model = tf.keras.models.load_model(args.checkpoint) char2idx = unpickle(args.checkpoint_dir, 'char2idx') idx2char = unpickle(args.checkpoint_dir, 'idx2char') text = unpickle(args.checkpoint_dir, 'dataset') vocab_size = len(idx2char) config = Config(vocab_size, args.epochs, args.initepochs) text_as_int = np.array([char2idx[c] for c in text ]) # works because text is a list of words train_model(args.checkpoint_dir, text_as_int, model, config)
def sh(self): # we need verbose to see the prompt after running shell command Config.set('verbose', True) print('Seed bash') run_shell_command( f'{engine_compose()} -f {Config.get("docker_yaml")} exec seed bash' )
def __init__(self, corpid=None, corpsecret=None): self.corp_params = {'corpid': corpid, 'corpsecret': corpsecret} cf = Config() self.api = cf.get('api') self.method = cf.get('method') self.cache = Cache.factory(cf.get('cache').adapter) self.access_token = self.cache.get('access_token')
def add_hosts(self): ips = Config.get('ips') if not ips: ips = get_boxes_container_info()['ips'] hostnames = Config.get('hostnames') if not hostnames: hostnames = get_boxes_container_info()['hostnames'] _add_hosts(ips, hostnames)
def post_data(self, data): timestamp = datetime.now().isoformat() post_data = POST_DATA_FORMAT.format(self.sensor.sensor_id, data, timestamp) http = httplib.HTTPConnection(Config.get("WEBSERVER_HOST_NAME")) http.request("POST", Config.get("WEBSERVER_POST_DATA_URI"), post_data) response = http.getresponse() if response.status != 200: logger.warning("Error posting images to webserver. " + response.reason)
def toggleRefDisplay(wd): ''' 切换参考区域 :param wd: 参考区域 :return: ''' config = Config(GLOBAL_SECTION) if config.readConf(GLOBAL_SECTION, 'ref', 'True') == str(True): wd.layout = Layout() else: wd.layout = Layout(display = 'none')
def deploy(self): data = Config.get('data') hostname = Config.get('hostname') vg = Config.get('vg') if not hostname: # assume this host hostname = run_shell_command('hostname') if vg: deploy_osds_in_vg(vg) else: deploy_osd(data, hostname)
def load_median_experiments(ini_file_path,date_exp): Config.read(ini_file_path) Config.sections() models = dict(Config.items('inference')) number_data_points = ConfigSectionMap("test-data")['data-points'].split(',') list_noise_variance = ConfigSectionMap("test-data")['observation_noise'].split(',') repeat = ConfigSectionMap("others")['repeat'] number_test_points = ConfigSectionMap("others")['number-test-points'] test_problems = ConfigSectionMap("test-data")['test-problems'].split(',') total_steps_outer = ConfigSectionMap("MCMC")["total-steps-outer"] directory="results/"+date_exp residual_list = [] df_list= [] index_it=0 for key in models: all_inf_string = ConfigSectionMap("inference")[key] list_of_infer= all_inf_string.split(";") for infer in list_of_infer: for test_problem in test_problems: for noise in list_noise_variance: for n in number_data_points: df = pd.DataFrame() print(index_it) index_it+=1 for index in range(int(repeat)): experiment_name = key+'_'+infer+'_'+total_steps_outer+'_'\ + test_problem +'_'+noise+'_'+n+'_'+number_test_points+'_'+str(index) output_file_name = directory+"/exp_"+ experiment_name try: df = pd.read_pickle(output_file_name) residual_list.append(np.abs(df['residuals'].values)) except ValueError: ("could not open "+output_file_name) res_matrix = np.matrix(residual_list) residual_list = [] df['median-residual']=pd.Series([np.median(res_matrix[:,i].tolist()) for i in df.index ]) df['model'] = pd.Series([key for _ in range(len(df.index))], index=df.index) df['test_problem'] = pd.Series([test_problem for _ in range(len(df.index))], index=df.index) df['noise'] = pd.Series([noise for _ in range(len(df.index))], index=df.index) df['n'] = pd.Series([n for _ in range(len(df.index))], index=df.index) df['repeat'] = pd.Series([index for _ in range(len(df.index))], index=df.index) df_list.append(df) df_experiment=pd.concat(df_list) df_experiment.to_pickle("results/median_residual_"+date_exp) return df_experiment
def deploy(self): data = Config.get('data') hostname = Config.get('hostname') vg = Config.get('vg') if not hostname: # assume this host hostname = run_shell_command('hostname') if vg: # deploy with vg lvs = json.loads(run_shell_command('lvs --reportformat json')) for lv in lvs['report'][0]['lv']: if lv['vg_name'] == vg: deploy_osd(f'{vg}/{lv["lv_name"]}', hostname) else: deploy_osd(data, hostname)
def deploy_osds_in_vg(vg: str): """ rotate host will deploy each osd in a different host deploying osds will not succeed with starting services so this makes another process to run on the background """ if inside_container(): lvs = json.loads(run_shell_command('lvs --reportformat json')) # distribute osds per host hosts = get_orch_hosts() host_index = 0 for lv in lvs['report'][0]['lv']: if lv['vg_name'] == vg: deployed = False while not deployed: deployed = deploy_osd( f'{vg}/{lv["lv_name"]}', hosts[host_index]['hostname'] ) host_index = (host_index + 1) % len(hosts) else: verbose = '-v' if Config.get('verbose') else '' print('Redirecting deploy osd in vg to inside container') run_dc_shell_command( f'/cephadm/box/box.py {verbose} osd deploy --vg {vg}', 1, 'seed' )
def initializeComponent(self, config): components = [] if config != None: for configItem in config.getchildren(): try: [module,className] = configItem.find('Class').text.split('.') except: main_log.error('Module must have Class element') continue try: exec('from ' + module+'.'+className + ' import *') main_log.debug(module +'.' +className + 'imported') except Exception as inst: main_log.error('Error importing ' + module+'.'+className+ '. Component not\ initialized.') main_log.error(str(inst)) continue args = configGetter.pullArgsFromItem(configItem) args['parentScope'] = self try: new_component = eval(className+'(args)') new_component.addDieListener(self) components.append(new_component) main_log.info(className + 'initialized with args ' + str(args)) except Exception as inst: main_log.error('Failure while initializing ' + className + ' with ' + str(args)) main_log.error(str(inst)) return components
def _read_config(data_path): '''读取配置文件。''' try: if(not os.path.exists(data_path)): return None conf_parse = Config(data_path) sections = conf_parse.getsections() path = '.' for section in sections: if (section != "log_path"): continue path = conf_parse.get("log_path","path") return path except Exception,e: print e return None
def __init__(self): super().__init__() self.ui = Ui_ConfigCreator() self.ui.setupUi(self) Styling.set_style(self) self.setWindowTitle("Enter your Token") cookie_explanation = "" with open(os.path.join(os.path.realpath(__file__), "../../files/cookie_explanation.txt"), 'r', encoding='utf8') as f: for line in f.read().splitlines(): cookie_explanation += line + "\n" self.ui.label_sids.setText("SIDS Token: ") self.ui.label_explanation.setText(cookie_explanation) self.ui.btn_ok.setText("CONFIRM") self.ui.btn_ok.pressed.connect(self.ok_pressed) # dark mode options dark_mode_enabled = Config.load_config_key(Config.ConfigKeys.DARK_MODE.value) if dark_mode_enabled is None: dark_mode_enabled = True if dark_mode_enabled: # style items palette = QPalette() palette.setColor(QPalette.Text, Qt.white) self.ui.text_sids.setPalette(palette)
def join(u, v, v_name, key, type_): def func_rename(x): if 'mv' in x[1]: return f'{CONSTANT.MULTI_CAT_PREFIX}{x[1]}({x[0]})' elif 'cat_union' in x[1]: return f'{CONSTANT.MULTI_CAT_PREFIX}{x[1]}({x[0]})' elif 'cat_last' in x[1]: return f'{CONSTANT.CATEGORY_PREFIX}{x[1]}({x[0]})' elif CONSTANT.NUMERICAL_PREFIX in x[0]: return f"{CONSTANT.NUMERICAL_PREFIX}{x[1]}({x[0]})" elif CONSTANT.CATEGORY_PREFIX in x[0]: return f"{CONSTANT.CATEGORY_PREFIX}{x[1]}({x[0]})" if type_.split("_")[2] == 'many': agg_funcs = { col: Config.aggregate_op(col) for col in v if col != key[0] and not col.startswith(CONSTANT.TIME_PREFIX) # and not col.startswith(CONSTANT.MULTI_CAT_PREFIX) } v = v.groupby(key).agg(agg_funcs) v.columns = v.columns.map(func_rename) # v.columns = v.columns.map(lambda a: # f"{CONSTANT.NUMERICAL_PREFIX}{a[1].upper()}({a[0]})") else: v = v.set_index(key) v.columns = v.columns.map(lambda a: f"{a.split('_', 1)[0]}_{v_name}.{a}") return u.join(v, on=key)
def initializeComponent(self, config): components = [] if config != None: for configItem in config.getchildren(): try: [module, className] = configItem.find("Class").text.split(".") except: main_log.error("Module must have Class element") continue try: exec("from " + module + "." + className + " import *") main_log.debug(module + "." + className + "imported") except Exception as inst: main_log.error( "Error importing " + module + "." + className + ". Component not\ initialized." ) main_log.error(str(inst)) continue args = configGetter.pullArgsFromItem(configItem) args["parentScope"] = self try: new_component = eval(className + "(args)") new_component.addDieListener(self) components.append(new_component) main_log.info(className + "initialized with args " + str(args)) except Exception as inst: main_log.error("Failure while initializing " + className + " with " + str(args)) main_log.error(str(inst)) return components
def __init__(self, info): self.config = Config(info) self.tables = None self.y = None self.one_hot_features = [] self.one_hot_models = [] self.mlbs = []
def loadFile(args): fileName = args[1] parentTags = [] if '-b' in args: parentTags.append('BehaviorConfiguration') if '-i' in args: parentTags.append('InputConfiguration') if not parentTags: parentTags = ['InputConfiguration', 'BehaviorConfiguration','PixelConfiguration', 'RendererConfiguration'] confRoot = config.loadConfigFile(fileName).getroot() for tag in parentTags: subTree = confRoot.find(tag) print tag + ':' nodesWithArgs = search.parental_tree_search(subTree,'.getchildren()', ".tag=='Args'") nodesWithDocs = search.parental_tree_search(subTree,'.getchildren()', ".tag=='Doc'") for obj in nodesWithArgs: args = obj.find('Args') cidEl = args.find('Id') docEl = args.find('Doc') or obj.find('Doc') classEl = obj.find('Class') cid = None doc = None className = None if cidEl != None: cid = cidEl.text if docEl != None: doc = docEl.text if classEl != None: className = classEl.text print '\tComponent %(id)s - Doc: %(doc)s - Class: %(class)s' % {'id':cid, 'doc':doc, 'class':className}
def temporal_join(u, v, v_name, key, time_col): timer = Timer() window_size = CONSTANT.WINDOW_SIZE if len(u) * CONSTANT.WINDOW_RATIO < CONSTANT.WINDOW_SIZE \ else int(len(u) * CONSTANT.WINDOW_RATIO) hash_max = CONSTANT.HASH_MAX if len(u) / CONSTANT.HASH_MAX > CONSTANT.HASH_BIN \ else int(len(u) / CONSTANT.HASH_BIN) # window_size = CONSTANT.WINDOW_SIZE # hash_max = CONSTANT.HASH_MAX if isinstance(key, list): assert len(key) == 1 key = key[0] tmp_u = u[[time_col, key]] timer.check("select") tmp_u = pd.concat([tmp_u, v], keys=['u', 'v'], sort=False) timer.check("concat") # rehash_key = f'rehash_{key}' # tmp_u[rehash_key] = tmp_u[key].apply(lambda x: hash(x) % CONSTANT.HASH_MAX) # timer.check("rehash_key") tmp_u.sort_values(time_col, inplace=True) timer.check("sort") agg_funcs = { col: Config.aggregate_op(col) for col in v if col != key and not col.startswith(CONSTANT.TIME_PREFIX) and not col.startswith(CONSTANT.MULTI_CAT_PREFIX) } # tmp_u = tmp_u.groupby(rehash_key).rolling(window=CONSTANT.WINDOW_SIZE).agg(agg_funcs) tmp_u = tmp_u.rolling(window=window_size).agg(agg_funcs) # timer.check("group & rolling & agg") # # tmp_u.reset_index(0, drop=True, inplace=True) # drop rehash index # timer.check("reset_index") tmp_u.columns = tmp_u.columns.map( lambda a: f"{CONSTANT.NUMERICAL_PREFIX}{a[1].upper()}_ROLLING5({v_name}.{a[0]})") if tmp_u.empty: log("empty tmp_u, return u") return u # ret = pd.concat([u, tmp_u3.loc['u']], axis=1, sort=False) ret = u.merge(tmp_u.loc['u'], right_index=True, left_index=True, how="outer") timer.check("final concat") del tmp_u, tmp2_u return ret
def configureInstallation(self, installationConfig): defaults = configGetter.generateArgDict(installationConfig.find('Defaults')) for defaultSelection in defaults: componentToMap = compReg.getComponent(defaults[defaultSelection]) compReg.registerComponent(compReg.getComponent(defaults[defaultSelection]),\ 'Default'+defaultSelection) main_log.debug('Default Set: ' + defaultSelection + 'set to ' +\ defaults[defaultSelection])
def __init__(self, info): self.config = Config(info) self.tables = None self.diff_info=None self.model = None self.time={} self.training_data = None self.start_time = time.time()
def __init__(self, secretKey=None, algorithm='HS256', expireHour=10): if secretKey is None: config = Config.Config() self.secretKey = str(config.properties.get("JWTSecretKey")) else: self.secretKey = secretKey self.algorithm = algorithm self.expireTime = expireHour * 3600 * 1000
def install(): conf = Config.Config() if not conf.properties.has_key("INSTALL"): Install.install() conf.properties.put("INSTALL", str(time.time())) return "<h1>:)</h1></br>MarkdownManager2 has been suuccessfully installed :)" else: return "<h1>:(</h1></br>You have already installed.<br>To reinstall,remove <strong>INSTALL</strong> from config.properties "
def __init__(self, info): #print("Time before init: %s"%str(time.time())) self.config = Config(info) #print(self.config["start_time"]) #print("Time after init: %s"%str(time.time())) #print(self.config.time) #input() self.tables = None self.y = None
def loadFile(args): fileName = args[1] parentTags = [] if '-b' in args: parentTags.append('BehaviorConfiguration') if '-i' in args: parentTags.append('InputConfiguration') if not parentTags: parentTags = [ 'InputConfiguration', 'BehaviorConfiguration', 'PixelConfiguration', 'RendererConfiguration' ] confRoot = config.loadConfigFile(fileName).getroot() for tag in parentTags: subTree = confRoot.find(tag) print tag + ':\n' nodesWithArgs = search.parental_tree_search(subTree, '.getchildren()', ".tag=='Args'") nodesWithDocs = search.parental_tree_search(subTree, '.getchildren()', ".tag=='Doc'") for obj in nodesWithArgs: args = obj.find('Args') cidEl = args.find('Id') docEl = args.find('Doc') or obj.find('Doc') classEl = obj.find('Class') cid = None doc = None className = None if cidEl != None: cid = cidEl.text if docEl != None and args != None: argDict = config.pullArgsFromItem(obj) doc = docEl.text try: doc = doc % argDict except: doc = docEl.text if classEl != None: className = classEl.text print '\t%(id)s : %(class)s\n\t\t%(doc)s\n' % { 'id': cid, 'doc': doc, 'class': className }
class CameraCapture(object): def __init__(self): self.n_cameras = int(Config.get("N_CAMERAS")) # Initialize the cameras self.cameras = [] for i in xrange(self.n_cameras): try: self.cameras.append(Camera(camera_index = i)) except: logger.warning("Error opening camera #"+str(i)) self.n_cameras = len(self.cameras) def start(self): while True: try: # 1. Capture images from all cameras logger.debug("Capturing Images") images = self.get_images() # 2. Send them to the remote server logger.debug("Submitting Images") self.post_images(images) except: logger.warning("Unable to retrieve and send images") # Wait time.sleep(PERIOD) def get_images(self): images = [] for cam in self.cameras: # Get Image from camera img = cam.getImage() images.append(img) return images def post_images(self, images): #Todo: Saving the images to disk until webserver is up and running for i in xrange(self.n_cameras): img = images[i] img.show() img.save("images/{}-{}.jpg".format(i, time.time())) if __name__ == "__main__": if len(sys.argv) != 2: print "usage: ./camera_process.py <sensor_config> <network config file>" # Initialize configuration Config.init(sys.argv[1], sys.argv[2]) # Start logger.debug("Starting Cameras Process") cc = CameraCapture() cc.start()
def load_experiments(ini_file_path,date_exp): Config.read(ini_file_path) Config.sections() models = dict(Config.items('inference')) number_data_points = ConfigSectionMap("test-data")['data-points'].split(',') list_noise_variance = ConfigSectionMap("test-data")['observation_noise'].split(',') repeat = ConfigSectionMap("others")['repeat'] number_test_points = ConfigSectionMap("others")['number-test-points'] test_problems = ConfigSectionMap("test-data")['test-problems'].split(',') total_steps_outer = ConfigSectionMap("MCMC")["total-steps-outer"] condition=[] directory="results/"+date_exp un_averaged_frames=[] for index in range(int(repeat)): frames =[] for key in models: all_inf_string = ConfigSectionMap("inference")[key] list_of_infer= all_inf_string.split(";") for infer in list_of_infer: for test_problem in test_problems: for noise in list_noise_variance: for n in number_data_points: experiment_name = key+'_'+infer+'_'+total_steps_outer+'_'\ + test_problem +'_'+noise+'_'+n+'_'+number_test_points+'_'+str(index) output_file_name = directory+"/exp_"+ experiment_name try: df = pd.read_pickle(output_file_name) df['model'] = pd.Series([key for _ in range(len(df.index))], index=df.index) df['test_problem'] = pd.Series([test_problem for _ in range(len(df.index))], index=df.index) df['noise'] = pd.Series([noise for _ in range(len(df.index))], index=df.index) df['n'] = pd.Series([n for _ in range(len(df.index))], index=df.index) df['repeat'] = pd.Series([index for _ in range(len(df.index))], index=df.index) #df['mcmcm-step-index'] = pd.Series([i for i in range(len(total_steps_outer))], index=df.index) frames.append(df) except ValueError: ("could not open "+output_file_name) un_averaged_frames.append(pd.concat(frames)) df_experiment = average_frames(un_averaged_frames) df_experiment.to_pickle("results/experiment_"+date_exp) return df_experiment
def readConfig(self, config): conf = Config(config).options self.email = conf[self.user]['email'] self.password = conf[self.user]['password'] self.cookiefile = os.path.join( os.path.dirname(os.path.abspath(__file__)), conf[self.user]['cookie']) self.tokenfile = os.path.join( os.path.dirname(os.path.abspath(__file__)), conf[self.user]['token'])
def start(self): osds = Config.get('osds') hosts = Config.get('hosts') # ensure boxes don't exist run_shell_command('docker-compose down') print('Checking docker images') if not image_exists(CEPH_IMAGE): get_ceph_image() if not image_exists(BOX_IMAGE): get_box_image() if not Config.get('skip_create_loop'): print( 'Adding logical volumes (block devices) in loopback device...') osd.create_loopback_devices(osds) print(f'Added {osds} logical volumes in a loopback device') print('Starting containers') dcflags = '-f docker-compose.yml' if not os.path.exists('/sys/fs/cgroup/cgroup.controllers'): dcflags += ' -f docker-compose.cgroup1.yml' run_shell_command( f'docker-compose {dcflags} up --scale hosts={hosts} -d') run_shell_command('sudo sysctl net.ipv4.conf.all.forwarding=1') run_shell_command('sudo iptables -P FORWARD ACCEPT') print('Seting up host ssh servers') ips = get_host_ips() print(ips) for h in range(hosts): host._setup_ssh(h + 1) verbose = '-v' if Config.get('verbose') else '' skip_deploy = '--skip_deploy_osds' if Config.get( 'skip_deploy_osds') else '' skip_monitoring_stack = '--skip_monitoring_stack' if Config.get( 'skip_monitoring_stack') else '' skip_dashboard = '--skip_dashboard' if Config.get( 'skip_dashboard') else '' box_bootstrap_command = ( f'/cephadm/box/box.py {verbose} cluster bootstrap ' '--osds {osds} ' '--hosts {hosts} ' f'{skip_deploy} ' f'{skip_dashboard} ' f'{skip_monitoring_stack} ') run_dc_shell_command( f'/cephadm/box/box.py {verbose} cluster bootstrap --osds {osds} --hosts {hosts} {skip_deploy}', 1, 'seed') host._copy_cluster_ssh_key(ips) print('Bootstrap finished successfully')
def get(self): conf = Config("config.ini") database_conf = conf["database"] db = DB(database_conf["server"], database_conf["user"], database_conf["password"]) sensor = BME280InformationProvider(db) return { "humidity": sensor["humidity"]["data"], "pressure": sensor["pressure"]["data"], "temperature": sensor["temperature"]["data"] }
def test_composite(self): parent = ElementTree() overrider = ElementTree() parent.parse('tests/testdata/parent.xml') overrider.parse('tests/testdata/override.xml') result = Config.compositeXMLTrees(parent,overrider) result = ElementTree(result) result.write('tests/testdata/compositeTESTout.xml') assert filecmp.cmp('tests/testdata/compositeTESTout.xml','tests/testdata/compositeTRUTH.xml')
def __init__(self, info): self.config = Config(info) self.tables = None self.time_window_config = {} self.feature_selection_models = [] self.feature_selection_window = 40 self.Xs = None self.y = None self.cat_dict_counts = []
def extract_element(html: str) -> int: logger.debug("# extract_element()") result_content: str = "" # configuration config = Config() if not config: logger.error("can't read configuration") sys.exit(-1) collection_conf = config.get_collection_configs() if not collection_conf: logger.error("can't get collection configuration") sys.exit(-1) #id_list = collection_conf["element_id_list"] #class_list = collection_conf["element_class_list"] #path_list = collection_conf["element_path_list"] element_list = collection_conf["element_list"][0] encoding = collection_conf["encoding"] logger.debug("# encoding: %r" % encoding) # sanitize html = re.sub(r'alt="(.*)<br>(.*)"', r'alt="\1 \2"', html) html = re.sub(r'<br>', r'<br/>', html) html = re.sub(r'[\x01\x08]', '', html, re.LOCALE) html = re.sub(r'<\?xml[^>]+>', r'', html) for parser in ["html.parser"]: soup = BeautifulSoup(html, parser) if not soup: logger.error("can't parse HTML") sys.exit(-1) for element_spec in element_list: if element_spec == "element_path": path_str = element_list[element_spec] divs = HTMLExtractor.get_node_with_path(soup, path_str) elif element_spec == "element_class": class_str = element_list[element_spec] divs = soup.find_all(class_=class_str) elif element_spec == "element_id": id_str = element_list[element_spec] divs = soup.find_all(attrs={"id": id_str}) else: raise RuntimeError("unknown configuration '%s'" % element_spec) if divs: for div in divs: #logger.debug("div=%s" % str(div)) result_content = result_content + str(div) return result_content
def __init__(self): self.n_cameras = int(Config.get("N_CAMERAS")) # Initialize the cameras self.cameras = [] for i in xrange(self.n_cameras): try: self.cameras.append(Camera(camera_index = i)) except: logger.warning("Error opening camera #"+str(i)) self.n_cameras = len(self.cameras)
def getResult(self): result = {} result["ExID"] = self.script.exid result["ScriptID"] = self.script.id result["Priority"] = self.Priority result["ScriptLineNum"] = self.ScriptLineNum result["StartTime"] = self.StartTime result["LocalStartTime"] = self.LocalStartTime result["CommandType"] = self.CommandType result["Protocol"] = self.Protocol result["SourceName"] = Config.getHostname() result["SourceIP"] = Config.getHostIP() result["DestName"] = self.DestName result["DestIP"] = self.DestIP result["DestAddress"] = self.DestAddress result["NumOfTrials"] = self.NumOfTrials result["Success"] = self.Success result["reachedDest"] = self.reachedDest result["Exceptions"] = self.Exceptions result["RawDetails"] = self.RawDetails return result
def __init__(self, configFileName): main_log.info("System Initialization began based on: " + str(configFileName)) self.timer = clock.Stopwatch() self.timer.start() self.inputs = {} # dict of inputs and their bound behaviors, keyed by InputId self.behaviors = {} self.lock = thread.allocate_lock() self.behaviorOutputs = {} # key: [list of output destinations] self.behaviorInputs = {} self.componentDict = {} self.inputBehaviorRegistry = {} # inputid -> behaviors listening to that self.dieNow = False # input self.screen = Screen() compReg.initRegistry() compReg.registerComponent(self.screen, "Screen") # TODO: move to constants file bqs.initBQS() # initialize the behavior query system # read configs from xml config = configGetter.loadConfigFile(configFileName) rendererConfig = config.find("RendererConfiguration") self.initializeRenderers(rendererConfig) pixelConfig = config.find("PixelConfiguration") self.initializeScreen(pixelConfig) inputConfig = config.find("InputConfiguration") self.initializeInputs(inputConfig) behaviorConfig = config.find("BehaviorConfiguration") self.initializeBehaviors(behaviorConfig) mapperConfig = config.find("PixelMapperConfiguration") self.initializeMapper(mapperConfig) # inits main_log.info("All components initialized") # self.registerAllComponents() installationConfig = config.find("InstallationConfiguration") self.configureInstallation(installationConfig) # Done initializing. Lets start this thing! self.timer.stop() # main_log.info('Initialization done. Time: ', self.timer.elapsed(), 'ms') self.mainLoop()
def test_eval(self): assert Config.attemptEval('5') == 5 assert Config.attemptEval('{5:10, 12:15}') == {5:10, 12:15} singleLayerLambda = Config.attemptEval('${Val}$*5') assert singleLayerLambda({'Val':2}) == 10 doubleLayerLambda = Config.attemptEval("${Val1}$*'${Val2}$'") assert doubleLayerLambda({'Val1':3})({'Val2':7}) == 21 conditional = Config.attemptEval("${Val1}$*5=='${Val2}$'") assert conditional({'Val1':5})({'Val2':25}) == True assert conditional({'Val1':5})({'Val2':26}) == False onlyDouble = Config.attemptEval("'${Val1}$'*'${Val2}$'") assert onlyDouble({})({'Val1':3, 'Val2':7}) == 21
def test_inheritance(self): result = Config.loadConfigFile('tests/testdata/inheritanceTEST.xml') result.write('tests/testdata/inheritanceTESTout.xml') assert filecmp.cmp('tests/testdata/inheritanceTESTout.xml',\ 'tests/testdata/inheritanceTRUTH.xml')
def validateArgs(self, argFileName): self.validateArgDict(configGetter.loadParamRequirementDict(argFileName))#util
def get_last_n_residuals(ini_file_path,date_exp,n_last_residuals): Config.read(ini_file_path) Config.sections() models = dict(Config.items('inference')) number_data_points = ConfigSectionMap("test-data")['data-points'].split(',') list_noise_variance = ConfigSectionMap("test-data")['observation_noise'].split(',') repeat = ConfigSectionMap("others")['repeat'] number_test_points = ConfigSectionMap("others")['number-test-points'] test_problems = ConfigSectionMap("test-data")['test-problems'].split(',') total_steps_outer = ConfigSectionMap("MCMC")["total-steps-outer"] condition=[] directory="results/"+date_exp un_averaged_frames=[] base_line = [] residuals=[] model=[] problem=[] noise_level=[] n_training_data=[] for index in range(int(repeat)): for key in models: all_inf_string = ConfigSectionMap("inference")[key] list_of_infer= all_inf_string.split(";") for infer in list_of_infer: for test_problem in test_problems: for noise in list_noise_variance: for n in number_data_points: experiment_name = key+'_'+infer+'_'+total_steps_outer+'_'\ + test_problem +'_'+noise+'_'+n+'_'+number_test_points+'_'+str(index) output_file_name = directory+"/exp_"+ experiment_name try: df_all = pd.read_pickle(output_file_name) df=df_all.tail(n_last_residuals) base_line.extend(df['base-line'].iloc[0]) #index=[] for j in range(len(df.index)): residuals.extend(df['residuals'].iloc[j]) model.extend([key for _ in range(int(number_test_points))]) problem.extend([test_problem for _ in range(int(number_test_points))]) noise_level.extend([noise for _ in range(int(number_test_points))]) n_training_data.extend([n for _ in range(int(number_test_points))]) #index.append([j for _ in range(int(number_test_points))]) #residuals= np.concatenate(residuals,df['residuals'].iloc[j]) ''' df['model'] = pd.Series([key for _ in range(len(df.index))], index=df.index) df['test_problem'] = pd.Series([test_problem for _ in range(len(df.index))], index=df.index) df['noise'] = pd.Series([noise for _ in range(len(df.index))], index=df.index) df['n'] = pd.Series([n for _ in range(len(df.index))], index=df.index) df['repeat'] = pd.Series([index for _ in range(len(df.index))], index=df.index) #df['mcmcm-step-index'] = pd.Series([i for i in range(len(total_steps_outer))], index=df.index) ''' except ValueError: ("could not open "+output_file_name) return pd.DataFrame({'residuals':residuals,'model':model,'test_problem':problem,'noise':noise_level,'n':n_training_data}),np.mean(base_line)
def get_socket(self, sensor): s = socket(AF_INET, SOCK_DGRAM) s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind((Config.get("BROADCAST_RECEIVING_IP"), sensor.udp_port)) return s
def log(self, msg): self.logger.write(msg) ################################ #### INSTANCE METHODS #### ################################ def __str__(self): msg = 'Expense Report printer:\n' for key in self.__dict__.keys(): msg += ' %s: %s\n' % (key, self.__dict__[key]) msg += '-------------------------' return msg if __name__ == "__main__": Config.instance().DEBUG = True api = API() msg1 = {'user':'******', 'date':'2015-05-14', 'amount': 0.55, 'category':'food & beverage', 'reimburseable': False} msg = json.dumps(msg1) print msg resp = api.add_expense(msg) print resp
parser.add_argument("-d", metavar="DATE", type=str, default=datetime.date.today(), help="Date") parser.add_argument("--local", action="store_true", help="Do not use parallelism") parser.add_argument("--cores", type=int, default=60, help="Number of cores") parser.add_argument("-m", metavar="MESSAGE", type=str, default=" no message ", help="Message") ns = parser.parse_args() ini_file_path = ns.ini_file_path date_exp = ns.d run_locally = ns.local cores = ns.cores message = ns.m Config.read(ini_file_path) Config.sections() models = dict(Config.items('inference')) number_data_points = ConfigSectionMap("test-data")['data-points'].split(',') list_noise_variance = ConfigSectionMap("test-data")['observation_noise'].split(',') repeat = ConfigSectionMap("others")['repeat'] number_test_points = ConfigSectionMap("others")['number-test-points'] test_problems = ConfigSectionMap("test-data")['test-problems'].split(',') total_steps_outer = ConfigSectionMap("MCMC")["total-steps-outer"] condition=[] for key in models: all_inf_string = ConfigSectionMap("inference")[key]
def get_photon_socket(): s = socket(AF_INET, SOCK_DGRAM) s.bind(('', int(Config.get("PHOTON_UDP_IN_PORT")))) return s
def send_value(self, sensor, value): self.bsocket.sendto(str(value), (Config.get("BROADCAST_IP"), sensor.udp_port))
values = [] data = self.psocket.recv(512) if len(data) != 6*4: logger.warning("Invalid packet format") raise Exception for i in xrange(6): value = struct.unpack("I", data[(i*4):(i+1)*4]) values.append(value[0]) return values """ Sends a value to the Configure port for this sensor via UDP broadcast. """ def send_value(self, sensor, value): self.bsocket.sendto(str(value), (Config.get("BROADCAST_IP"), sensor.udp_port)) if __name__ == "__main__": if len(sys.argv) != 3: print "usage: ./photon_communicator.py <sensor config file> <network config file>" # Initialize configuration Config.init(sys.argv[1], sys.argv[2]) # Start logger.debug("Starting Photon Communicator") pc = PhotonCommunicator() pc.start()
def configureInstallation(self, installationConfig): defaults = configGetter.generateArgDict(installationConfig.find("Defaults")) for defaultSelection in defaults: componentToMap = compReg.getComponent(defaults[defaultSelection]) compReg.registerComponent(compReg.getComponent(defaults[defaultSelection]), "Default" + defaultSelection) main_log.debug("Default Set: " + defaultSelection + "set to " + defaults[defaultSelection])